[llvm] 65d3c22 - [RISCV] Merge more rv32/rv64 intrinsic tests that have the same content. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 31 16:26:18 PDT 2024
Author: Craig Topper
Date: 2024-07-31T16:26:08-07:00
New Revision: 65d3c220a184b11e41d6fc5853d865f3eb92b02e
URL: https://github.com/llvm/llvm-project/commit/65d3c220a184b11e41d6fc5853d865f3eb92b02e
DIFF: https://github.com/llvm/llvm-project/commit/65d3c220a184b11e41d6fc5853d865f3eb92b02e.diff
LOG: [RISCV] Merge more rv32/rv64 intrinsic tests that have the same content. NFC
Added:
llvm/test/CodeGen/RISCV/rvv/vsadd.ll
llvm/test/CodeGen/RISCV/rvv/vsaddu.ll
llvm/test/CodeGen/RISCV/rvv/vsmul.ll
llvm/test/CodeGen/RISCV/rvv/vssub.ll
llvm/test/CodeGen/RISCV/rvv/vssubu.ll
Modified:
Removed:
llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
deleted file mode 100644
index ca56ad2122c1f..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv64.ll
+++ /dev/null
@@ -1,2801 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-
-declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vsadd.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vsadd.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vsadd.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 9,
- i64 %1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 9,
- <vscale x 64 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 9,
- i64 %1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 9,
- <vscale x 32 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- i64 %1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vsadd.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 9,
- i64 %1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT: vsadd.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i64 %3, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsadd.ll
index c2586e4bc2d84..a108d98c1731b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd.ll
@@ -1,14 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -29,10 +31,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -53,9 +55,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -66,7 +68,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -76,10 +78,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -91,7 +93,7 @@ entry:
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -100,9 +102,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -113,7 +115,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -123,10 +125,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -138,7 +140,7 @@ entry:
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -147,9 +149,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -160,7 +162,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -170,10 +172,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -185,7 +187,7 @@ entry:
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -194,9 +196,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -207,7 +209,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -217,10 +219,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -232,7 +234,7 @@ entry:
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -241,9 +243,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -254,7 +256,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -264,10 +266,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -279,7 +281,7 @@ entry:
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -288,9 +290,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -301,7 +303,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -311,10 +313,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
@@ -327,7 +329,7 @@ entry:
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -336,9 +338,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
@@ -349,7 +351,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -359,10 +361,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -374,7 +376,7 @@ entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -383,9 +385,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
@@ -396,7 +398,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -406,10 +408,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -421,7 +423,7 @@ entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -430,9 +432,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
@@ -443,7 +445,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -453,10 +455,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -468,7 +470,7 @@ entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -477,9 +479,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
@@ -490,7 +492,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -500,10 +502,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -515,7 +517,7 @@ entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -524,9 +526,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -537,7 +539,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -547,10 +549,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -562,7 +564,7 @@ entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -571,9 +573,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
@@ -584,7 +586,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -594,10 +596,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
@@ -610,7 +612,7 @@ entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -619,9 +621,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
@@ -632,7 +634,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -642,10 +644,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -657,7 +659,7 @@ entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -666,9 +668,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
@@ -679,7 +681,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -689,10 +691,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -704,7 +706,7 @@ entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -713,9 +715,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
@@ -726,7 +728,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -736,10 +738,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -751,7 +753,7 @@ entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -760,9 +762,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
@@ -773,7 +775,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -783,10 +785,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -798,7 +800,7 @@ entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -807,9 +809,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -820,7 +822,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -830,10 +832,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
@@ -846,7 +848,7 @@ entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -855,9 +857,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
@@ -868,7 +870,7 @@ entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -878,10 +880,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -893,7 +895,7 @@ entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -902,9 +904,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
@@ -915,7 +917,7 @@ entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -925,10 +927,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -940,7 +942,7 @@ entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -949,9 +951,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
@@ -962,7 +964,7 @@ entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -972,10 +974,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
@@ -987,7 +989,7 @@ entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -996,9 +998,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
@@ -1009,7 +1011,7 @@ entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -1019,10 +1021,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
@@ -1035,7 +1037,7 @@ entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
@@ -1044,9 +1046,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vsadd.nxv1i8.i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -1057,7 +1059,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -1067,10 +1069,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vsadd.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1082,7 +1084,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -1091,9 +1093,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.nxv2i8.i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -1104,7 +1106,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -1114,10 +1116,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vsadd.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1129,7 +1131,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -1138,9 +1140,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.nxv4i8.i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -1151,7 +1153,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -1161,10 +1163,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vsadd.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1176,7 +1178,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -1185,9 +1187,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.nxv8i8.i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -1198,7 +1200,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -1208,10 +1210,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vsadd.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1223,7 +1225,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -1232,9 +1234,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.nxv16i8.i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -1245,7 +1247,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -1255,10 +1257,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vsadd.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1270,7 +1272,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -1279,9 +1281,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.nxv32i8.i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -1292,7 +1294,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -1302,10 +1304,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vsadd.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1317,7 +1319,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -1326,9 +1328,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.nxv64i8.i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -1339,7 +1341,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -1349,10 +1351,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsadd.mask.nxv64i8.i8(
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
@@ -1364,7 +1366,7 @@ entry:
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -1373,9 +1375,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.nxv1i16.i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
@@ -1386,7 +1388,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -1396,10 +1398,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsadd.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1411,7 +1413,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -1420,9 +1422,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.nxv2i16.i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
@@ -1433,7 +1435,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -1443,10 +1445,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsadd.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1458,7 +1460,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -1467,9 +1469,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.nxv4i16.i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
@@ -1480,7 +1482,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -1490,10 +1492,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsadd.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1505,7 +1507,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -1514,9 +1516,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.nxv8i16.i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
@@ -1527,7 +1529,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -1537,10 +1539,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsadd.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1552,7 +1554,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -1561,9 +1563,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.nxv16i16.i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
@@ -1574,7 +1576,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -1584,10 +1586,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsadd.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1599,7 +1601,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -1608,9 +1610,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.nxv32i16.i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
@@ -1621,7 +1623,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -1631,10 +1633,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsadd.mask.nxv32i16.i16(
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
@@ -1646,7 +1648,7 @@ entry:
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -1655,9 +1657,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.nxv1i32.i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
@@ -1668,7 +1670,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -1678,10 +1680,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsadd.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1693,7 +1695,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -1702,9 +1704,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.nxv2i32.i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -1715,7 +1717,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -1725,10 +1727,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsadd.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1740,7 +1742,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -1749,9 +1751,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.nxv4i32.i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
@@ -1762,7 +1764,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -1772,10 +1774,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsadd.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1787,7 +1789,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -1796,9 +1798,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.nxv8i32.i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
@@ -1809,7 +1811,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -1819,10 +1821,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsadd.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1834,7 +1836,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -1843,9 +1845,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.nxv16i32.i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
@@ -1856,7 +1858,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -1866,10 +1868,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsadd.mask.nxv16i32.i32(
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
@@ -1881,7 +1883,7 @@ entry:
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -1890,26 +1892,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -1919,28 +1927,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsadd.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsadd.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -1949,26 +1963,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -1978,28 +1998,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsadd.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsadd.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -2008,26 +2034,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -2037,28 +2069,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsadd.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsadd.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -2067,26 +2105,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsadd.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -2096,33 +2140,39 @@ declare <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vsadd.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vsadd.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsadd_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsadd.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsadd.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -2133,12 +2183,12 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -2150,12 +2200,12 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -2166,12 +2216,12 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -2183,12 +2233,12 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -2199,12 +2249,12 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2216,12 +2266,12 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -2232,12 +2282,12 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2249,12 +2299,12 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -2265,12 +2315,12 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2282,12 +2332,12 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -2298,12 +2348,12 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2315,12 +2365,12 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -2331,12 +2381,12 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
@@ -2348,12 +2398,12 @@ entry:
<vscale x 64 x i8> %1,
i8 9,
<vscale x 64 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
@@ -2364,12 +2414,12 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2381,12 +2431,12 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
@@ -2397,12 +2447,12 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2414,12 +2464,12 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
@@ -2430,12 +2480,12 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2447,12 +2497,12 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
@@ -2463,12 +2513,12 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2480,12 +2530,12 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -2496,12 +2546,12 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2513,12 +2563,12 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
@@ -2529,12 +2579,12 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
@@ -2546,12 +2596,12 @@ entry:
<vscale x 32 x i16> %1,
i16 9,
<vscale x 32 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
@@ -2562,12 +2612,12 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2579,12 +2629,12 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
@@ -2595,12 +2645,12 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2612,12 +2662,12 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
@@ -2628,12 +2678,12 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2645,12 +2695,12 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
@@ -2661,12 +2711,12 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2678,12 +2728,12 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -2694,12 +2744,12 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
@@ -2711,12 +2761,12 @@ entry:
<vscale x 16 x i32> %1,
i32 9,
<vscale x 16 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
@@ -2727,12 +2777,12 @@ entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2744,12 +2794,12 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
@@ -2760,12 +2810,12 @@ entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2777,12 +2827,12 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
@@ -2793,12 +2843,12 @@ entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
@@ -2810,12 +2860,12 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsadd_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
@@ -2826,12 +2876,12 @@ entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
- i32 %1)
+ iXLen %1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsadd_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
@@ -2843,7 +2893,7 @@ entry:
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
- i32 %3, i32 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i64> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
deleted file mode 100644
index b5fa9a921d46c..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv32.ll
+++ /dev/null
@@ -1,2849 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-
-declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vsaddu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vsaddu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 9,
- i32 %1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 9,
- <vscale x 64 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 9,
- i32 %1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 9,
- <vscale x 32 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 9,
- i32 %1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 9,
- <vscale x 16 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v9, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 9,
- <vscale x 1 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v10, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 9,
- <vscale x 2 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v12, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 9,
- <vscale x 4 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vsaddu.vi v8, v8, 9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 9,
- i32 %1)
-
- ret <vscale x 8 x i64> %a
-}
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
-; CHECK-NEXT: vsaddu.vi v8, v16, 9, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 9,
- <vscale x 8 x i1> %2,
- i32 %3, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll
similarity index 83%
rename from llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsaddu.ll
index 077e45f6408d9..57a89d6fe7d23 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu.ll
@@ -1,14 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -29,10 +31,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -53,9 +55,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -66,7 +68,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -76,10 +78,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -91,7 +93,7 @@ entry:
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -100,9 +102,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -113,7 +115,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -123,10 +125,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -138,7 +140,7 @@ entry:
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -147,9 +149,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -160,7 +162,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -170,10 +172,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -185,7 +187,7 @@ entry:
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -194,9 +196,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -207,7 +209,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -217,10 +219,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -232,7 +234,7 @@ entry:
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -241,9 +243,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -254,7 +256,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -264,10 +266,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -279,7 +281,7 @@ entry:
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -288,9 +290,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -301,7 +303,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -311,10 +313,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
@@ -327,7 +329,7 @@ entry:
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -336,9 +338,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
@@ -349,7 +351,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -359,10 +361,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -374,7 +376,7 @@ entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -383,9 +385,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
@@ -396,7 +398,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -406,10 +408,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -421,7 +423,7 @@ entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -430,9 +432,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
@@ -443,7 +445,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -453,10 +455,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -468,7 +470,7 @@ entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -477,9 +479,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
@@ -490,7 +492,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -500,10 +502,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -515,7 +517,7 @@ entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -524,9 +526,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -537,7 +539,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -547,10 +549,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -562,7 +564,7 @@ entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -571,9 +573,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
@@ -584,7 +586,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -594,10 +596,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
@@ -610,7 +612,7 @@ entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -619,9 +621,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
@@ -632,7 +634,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -642,10 +644,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -657,7 +659,7 @@ entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -666,9 +668,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
@@ -679,7 +681,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -689,10 +691,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -704,7 +706,7 @@ entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -713,9 +715,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
@@ -726,7 +728,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -736,10 +738,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -751,7 +753,7 @@ entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -760,9 +762,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
@@ -773,7 +775,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -783,10 +785,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -798,7 +800,7 @@ entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -807,9 +809,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -820,7 +822,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -830,10 +832,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
@@ -846,7 +848,7 @@ entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -855,9 +857,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
@@ -868,7 +870,7 @@ entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -878,10 +880,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -893,7 +895,7 @@ entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -902,9 +904,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
@@ -915,7 +917,7 @@ entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -925,10 +927,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -940,7 +942,7 @@ entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -949,9 +951,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
@@ -962,7 +964,7 @@ entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -972,10 +974,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
@@ -987,7 +989,7 @@ entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -996,9 +998,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
@@ -1009,7 +1011,7 @@ entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -1019,10 +1021,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
@@ -1035,7 +1037,7 @@ entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
@@ -1044,9 +1046,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.nxv1i8.i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -1057,7 +1059,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -1067,10 +1069,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vsaddu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1082,7 +1084,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -1091,9 +1093,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.nxv2i8.i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -1104,7 +1106,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -1114,10 +1116,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vsaddu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1129,7 +1131,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -1138,9 +1140,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.nxv4i8.i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -1151,7 +1153,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -1161,10 +1163,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vsaddu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1176,7 +1178,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -1185,9 +1187,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.nxv8i8.i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -1198,7 +1200,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -1208,10 +1210,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vsaddu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1223,7 +1225,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -1232,9 +1234,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.nxv16i8.i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -1245,7 +1247,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -1255,10 +1257,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vsaddu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1270,7 +1272,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -1279,9 +1281,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.nxv32i8.i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -1292,7 +1294,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -1302,10 +1304,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vsaddu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1317,7 +1319,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -1326,9 +1328,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.nxv64i8.i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -1339,7 +1341,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -1349,10 +1351,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vsaddu.mask.nxv64i8.i8(
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
@@ -1364,7 +1366,7 @@ entry:
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -1373,9 +1375,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.nxv1i16.i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
@@ -1386,7 +1388,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -1396,10 +1398,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vsaddu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1411,7 +1413,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -1420,9 +1422,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.nxv2i16.i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
@@ -1433,7 +1435,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -1443,10 +1445,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vsaddu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1458,7 +1460,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -1467,9 +1469,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.nxv4i16.i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
@@ -1480,7 +1482,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -1490,10 +1492,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vsaddu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1505,7 +1507,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -1514,9 +1516,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.nxv8i16.i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
@@ -1527,7 +1529,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -1537,10 +1539,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vsaddu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1552,7 +1554,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -1561,9 +1563,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.nxv16i16.i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
@@ -1574,7 +1576,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -1584,10 +1586,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vsaddu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1599,7 +1601,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -1608,9 +1610,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.nxv32i16.i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
@@ -1621,7 +1623,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -1631,10 +1633,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vsaddu.mask.nxv32i16.i16(
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
@@ -1646,7 +1648,7 @@ entry:
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -1655,9 +1657,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.nxv1i32.i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
@@ -1668,7 +1670,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -1678,10 +1680,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vsaddu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1693,7 +1695,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -1702,9 +1704,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.nxv2i32.i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -1715,7 +1717,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -1725,10 +1727,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vsaddu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1740,7 +1742,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -1749,9 +1751,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.nxv4i32.i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
@@ -1762,7 +1764,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -1772,10 +1774,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vsaddu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1787,7 +1789,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -1796,9 +1798,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.nxv8i32.i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
@@ -1809,7 +1811,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -1819,10 +1821,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vsaddu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1834,7 +1836,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -1843,9 +1845,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.nxv16i32.i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
@@ -1856,7 +1858,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -1866,10 +1868,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vsaddu.mask.nxv16i32.i32(
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
@@ -1881,7 +1883,7 @@ entry:
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -1890,20 +1892,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -1913,22 +1927,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsaddu.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsaddu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -1937,20 +1963,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -1960,22 +1998,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsaddu.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsaddu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -1984,20 +2034,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -2007,22 +2069,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsaddu.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsaddu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -2031,20 +2105,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vsaddu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsaddu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -2054,27 +2140,39 @@ declare <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vsaddu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vsaddu.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsaddu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsaddu.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsaddu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -2085,12 +2183,12 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -2102,12 +2200,12 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -2118,12 +2216,12 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -2135,12 +2233,12 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -2151,12 +2249,12 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -2168,12 +2266,12 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -2184,12 +2282,12 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -2201,12 +2299,12 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -2217,12 +2315,12 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -2234,12 +2332,12 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -2250,12 +2348,12 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -2267,12 +2365,12 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -2283,12 +2381,12 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
@@ -2300,12 +2398,12 @@ entry:
<vscale x 64 x i8> %1,
i8 9,
<vscale x 64 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 64 x i8> %a
}
-define <vscale x 1 x i16> @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
@@ -2316,12 +2414,12 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -2333,12 +2431,12 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
@@ -2349,12 +2447,12 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -2366,12 +2464,12 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
@@ -2382,12 +2480,12 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -2399,12 +2497,12 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
@@ -2415,12 +2513,12 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -2432,12 +2530,12 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -2448,12 +2546,12 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -2465,12 +2563,12 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
@@ -2481,12 +2579,12 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
@@ -2498,12 +2596,12 @@ entry:
<vscale x 32 x i16> %1,
i16 9,
<vscale x 32 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 32 x i16> %a
}
-define <vscale x 1 x i32> @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
@@ -2514,12 +2612,12 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -2531,12 +2629,12 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
@@ -2547,12 +2645,12 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -2564,12 +2662,12 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
@@ -2580,12 +2678,12 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -2597,12 +2695,12 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
@@ -2613,12 +2711,12 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -2630,12 +2728,12 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -2646,12 +2744,12 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
@@ -2663,12 +2761,12 @@ entry:
<vscale x 16 x i32> %1,
i32 9,
<vscale x 16 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 16 x i32> %a
}
-define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
@@ -2679,12 +2777,12 @@ entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv1i64_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -2696,12 +2794,12 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
@@ -2712,12 +2810,12 @@ entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv2i64_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -2729,12 +2827,12 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 2 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
@@ -2745,12 +2843,12 @@ entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv4i64_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
@@ -2762,12 +2860,12 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 4 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
@@ -2778,12 +2876,12 @@ entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 9,
- i64 %1)
+ iXLen %1)
ret <vscale x 8 x i64> %a
}
-define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vsaddu_mask_vi_nxv8i64_nxv8i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
@@ -2795,7 +2893,7 @@ entry:
<vscale x 8 x i64> %1,
i64 9,
<vscale x 8 x i1> %2,
- i64 %3, i64 1)
+ iXLen %3, iXLen 1)
ret <vscale x 8 x i64> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
deleted file mode 100644
index e7d8ae635f75c..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv32.ll
+++ /dev/null
@@ -1,2166 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-; RUN: not --crash llc -mtriple=riscv32 -mattr=+zve64d 2>&1 \
-; RUN: < %s | FileCheck %s --check-prefixes=ZVE64D
-
-; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul
-
-declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 0, i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32, i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 0, i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32, i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32, i32);
-
-define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 0, i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32, i32, i32);
-
-define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 0, i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32, i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32, i32);
-
-define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 0, i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32, i32, i32);
-
-define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32, i32);
-
-define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 0, i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32, i32, i32);
-
-define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32, i32);
-
-define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vsmul.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 0, i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32, i32, i32);
-
-define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 0, i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32, i32, i32);
-
-define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i32, i32);
-
-define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 0, i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32, i32, i32);
-
-define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 0, i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32, i32, i32);
-
-define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i32, i32);
-
-define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 0, i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32, i32, i32);
-
-define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32, i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32, i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32, i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32, i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32, i32, i32);
-
-define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 0, i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32, i32, i32);
-
-define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 0, i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32, i32, i32);
-
-define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 0, i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32, i32, i32);
-
-define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 0, i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32, i32, i32);
-
-define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i32, i32);
-
-define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 0, i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32, i32, i32);
-
-define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsmul.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i32 0, i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsmul.ll
similarity index 79%
rename from llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vsmul.ll
index 66bc5c9103a48..bc53bce889ddb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsmul-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsmul.ll
@@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-; RUN: not --crash llc -mtriple=riscv64 -mattr=+zve64d 2>&1 \
-; RUN: < %s | FileCheck %s --check-prefixes=ZVE64D
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 \
+; RUN: -mattr=+zve64d 2>&1 | FileCheck %s --check-prefixes=ZVE64D
; ZVE64D: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vsmul
@@ -10,9 +12,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -24,7 +26,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -34,9 +36,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -49,7 +51,7 @@ entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -58,9 +60,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -72,7 +74,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -82,9 +84,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -97,7 +99,7 @@ entry:
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -106,9 +108,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -120,7 +122,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -130,9 +132,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -145,7 +147,7 @@ entry:
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -154,9 +156,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -168,7 +170,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -178,9 +180,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -193,7 +195,7 @@ entry:
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -202,9 +204,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -216,7 +218,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -226,9 +228,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -241,7 +243,7 @@ entry:
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -250,9 +252,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -264,7 +266,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -274,9 +276,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -289,7 +291,7 @@ entry:
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -298,9 +300,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -312,7 +314,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -322,9 +324,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
@@ -338,7 +340,7 @@ entry:
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -347,9 +349,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -361,7 +363,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -371,9 +373,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -386,7 +388,7 @@ entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -395,9 +397,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -409,7 +411,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -419,9 +421,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -434,7 +436,7 @@ entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -443,9 +445,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -457,7 +459,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -467,9 +469,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -482,7 +484,7 @@ entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -491,9 +493,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -505,7 +507,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -515,9 +517,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -530,7 +532,7 @@ entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -539,9 +541,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -553,7 +555,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -563,9 +565,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -578,7 +580,7 @@ entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -587,9 +589,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -601,7 +603,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -611,9 +613,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
@@ -627,7 +629,7 @@ entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -636,9 +638,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -650,7 +652,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -660,9 +662,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -675,7 +677,7 @@ entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -684,9 +686,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -698,7 +700,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -708,9 +710,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -723,7 +725,7 @@ entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -732,9 +734,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -746,7 +748,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -756,9 +758,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -771,7 +773,7 @@ entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -780,9 +782,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -794,7 +796,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -804,9 +806,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -819,7 +821,7 @@ entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -828,9 +830,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -842,7 +844,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -852,9 +854,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
@@ -868,7 +870,7 @@ entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -877,9 +879,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -891,7 +893,7 @@ entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -901,9 +903,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -916,7 +918,7 @@ entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -925,9 +927,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -939,7 +941,7 @@ entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -949,9 +951,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -964,7 +966,7 @@ entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -973,9 +975,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -987,7 +989,7 @@ entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -997,9 +999,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1012,7 +1014,7 @@ entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -1021,9 +1023,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1035,7 +1037,7 @@ entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -1045,9 +1047,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
@@ -1061,7 +1063,7 @@ entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
@@ -1070,9 +1072,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.nxv1i8.i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1084,7 +1086,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -1094,9 +1096,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1109,7 +1111,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -1118,9 +1120,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.nxv2i8.i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1132,7 +1134,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -1142,9 +1144,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vsmul.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1157,7 +1159,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -1166,9 +1168,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.nxv4i8.i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1180,7 +1182,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -1190,9 +1192,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vsmul.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1205,7 +1207,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -1214,9 +1216,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.nxv8i8.i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1228,7 +1230,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -1238,9 +1240,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vsmul.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1253,7 +1255,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -1262,9 +1264,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.nxv16i8.i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1276,7 +1278,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -1286,9 +1288,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vsmul.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1301,7 +1303,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -1310,9 +1312,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.nxv32i8.i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1324,7 +1326,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -1334,9 +1336,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vsmul.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1349,7 +1351,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -1358,9 +1360,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.nxv64i8.i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1372,7 +1374,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -1382,9 +1384,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vsmul.mask.nxv64i8.i8(
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1397,7 +1399,7 @@ entry:
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -1406,9 +1408,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.nxv1i16.i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1420,7 +1422,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -1430,9 +1432,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vsmul.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1445,7 +1447,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -1454,9 +1456,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.nxv2i16.i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1468,7 +1470,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -1478,9 +1480,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vsmul.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1493,7 +1495,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -1502,9 +1504,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.nxv4i16.i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1516,7 +1518,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -1526,9 +1528,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vsmul.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1541,7 +1543,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -1550,9 +1552,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.nxv8i16.i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1564,7 +1566,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -1574,9 +1576,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vsmul.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1589,7 +1591,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -1598,9 +1600,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.nxv16i16.i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1612,7 +1614,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -1622,9 +1624,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vsmul.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1637,7 +1639,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -1646,9 +1648,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.nxv32i16.i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1660,7 +1662,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -1670,9 +1672,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vsmul.mask.nxv32i16.i16(
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1685,7 +1687,7 @@ entry:
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -1694,9 +1696,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.nxv1i32.i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1708,7 +1710,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -1718,9 +1720,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vsmul.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1733,7 +1735,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -1742,9 +1744,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.nxv2i32.i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1756,7 +1758,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -1766,9 +1768,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vsmul.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1781,7 +1783,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -1790,9 +1792,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.nxv4i32.i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1804,7 +1806,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -1814,9 +1816,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vsmul.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1829,7 +1831,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -1838,9 +1840,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.nxv8i32.i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1852,7 +1854,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -1862,9 +1864,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vsmul.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1877,7 +1879,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -1886,9 +1888,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.nxv16i32.i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64, i64);
+ iXLen, iXLen)
-define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vsmul_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1900,7 +1902,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -1910,9 +1912,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vsmul.mask.nxv16i32.i32(
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64, i64, i64);
+ iXLen, iXLen, iXLen)
-define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrwi vxrm, 0
@@ -1925,7 +1927,7 @@ entry:
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -1933,21 +1935,35 @@ entry:
declare <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64, i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
+ i64,
+ iXLen, iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vsmul.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -1957,22 +1973,35 @@ declare <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64, i64, i64);
-
-define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen, iXLen, iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsmul.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -1980,21 +2009,35 @@ entry:
declare <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64, i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
+ i64,
+ iXLen, iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vsmul.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -2004,22 +2047,35 @@ declare <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64, i64, i64);
-
-define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen, iXLen, iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsmul.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -2027,21 +2083,35 @@ entry:
declare <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64, i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
+ i64,
+ iXLen, iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vsmul.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -2051,22 +2121,35 @@ declare <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64, i64, i64);
-
-define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen, iXLen, iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsmul.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -2074,21 +2157,35 @@ entry:
declare <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64, i64, i64);
-
-define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vsmul.vx v8, v8, a0
-; CHECK-NEXT: ret
+ i64,
+ iXLen, iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vsmul.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 0, i64 %2)
+ iXLen 0, iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -2098,22 +2195,35 @@ declare <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64, i64, i64);
-
-define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrwi vxrm, 0
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vsmul.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen, iXLen, iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: csrwi vxrm, 0
+; RV32-NEXT: vsmul.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vsmul_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: csrwi vxrm, 0
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsmul.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 0, i64 %4, i64 1)
+ iXLen 0, iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
deleted file mode 100644
index 3928e6fbd1f75..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv64.ll
+++ /dev/null
@@ -1,2075 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-
-declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vssub.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i8> @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i8> @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i8> @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i8> @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i8> @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i8> @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i64 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i64,
- i64);
-
-define <vscale x 64 x i8> @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i16> @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i16> @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i16> @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i16> @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i16> @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i64 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i64,
- i64);
-
-define <vscale x 32 x i16> @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i32> @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i32> @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i32> @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i32> @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i64 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i64,
- i64);
-
-define <vscale x 16 x i32> @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vssub.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vssub.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vssub.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vssub.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i64 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssub.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i64 %4, i64 1)
-
- ret <vscale x 8 x i64> %a
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssub.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
rename to llvm/test/CodeGen/RISCV/rvv/vssub.ll
index f3ba5daafb683..50fca5e832af5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub.ll
@@ -1,14 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i32);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -29,10 +31,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -53,9 +55,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i32);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -66,7 +68,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -76,10 +78,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -91,7 +93,7 @@ entry:
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -100,9 +102,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i32);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -113,7 +115,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -123,10 +125,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -138,7 +140,7 @@ entry:
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -147,9 +149,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i32);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -160,7 +162,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -170,10 +172,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -185,7 +187,7 @@ entry:
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -194,9 +196,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i32);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -207,7 +209,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -217,10 +219,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -232,7 +234,7 @@ entry:
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -241,9 +243,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i32);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -254,7 +256,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -264,10 +266,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -279,7 +281,7 @@ entry:
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -288,9 +290,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i32);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -301,7 +303,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -311,10 +313,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
@@ -327,7 +329,7 @@ entry:
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -336,9 +338,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i32);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
@@ -349,7 +351,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -359,10 +361,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -374,7 +376,7 @@ entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -383,9 +385,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i32);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
@@ -396,7 +398,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -406,10 +408,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -421,7 +423,7 @@ entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -430,9 +432,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i32);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
@@ -443,7 +445,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -453,10 +455,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -468,7 +470,7 @@ entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -477,9 +479,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i32);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
@@ -490,7 +492,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -500,10 +502,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -515,7 +517,7 @@ entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -524,9 +526,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i32);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -537,7 +539,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -547,10 +549,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -562,7 +564,7 @@ entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -571,9 +573,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i32);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
@@ -584,7 +586,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -594,10 +596,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
@@ -610,7 +612,7 @@ entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -619,9 +621,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i32);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
@@ -632,7 +634,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -642,10 +644,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -657,7 +659,7 @@ entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -666,9 +668,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i32);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
@@ -679,7 +681,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -689,10 +691,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -704,7 +706,7 @@ entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -713,9 +715,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i32);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
@@ -726,7 +728,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -736,10 +738,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -751,7 +753,7 @@ entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -760,9 +762,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i32);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
@@ -773,7 +775,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -783,10 +785,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -798,7 +800,7 @@ entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -807,9 +809,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i32);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -820,7 +822,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -830,10 +832,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
@@ -846,7 +848,7 @@ entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -855,9 +857,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i32);
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
@@ -868,7 +870,7 @@ entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -878,10 +880,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -893,7 +895,7 @@ entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -902,9 +904,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i32);
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
@@ -915,7 +917,7 @@ entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -925,10 +927,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -940,7 +942,7 @@ entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -949,9 +951,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i32);
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
@@ -962,7 +964,7 @@ entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -972,10 +974,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
@@ -987,7 +989,7 @@ entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -996,9 +998,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i32);
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
@@ -1009,7 +1011,7 @@ entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -1019,10 +1021,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
@@ -1035,7 +1037,7 @@ entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
@@ -1044,9 +1046,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.nxv1i8.i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssub_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -1057,7 +1059,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -1067,10 +1069,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vssub.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1082,7 +1084,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -1091,9 +1093,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.nxv2i8.i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssub_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -1104,7 +1106,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -1114,10 +1116,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vssub.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1129,7 +1131,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -1138,9 +1140,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.nxv4i8.i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssub_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -1151,7 +1153,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -1161,10 +1163,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vssub.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1176,7 +1178,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -1185,9 +1187,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.nxv8i8.i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssub_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -1198,7 +1200,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -1208,10 +1210,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vssub.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1223,7 +1225,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -1232,9 +1234,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.nxv16i8.i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssub_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -1245,7 +1247,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -1255,10 +1257,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vssub.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1270,7 +1272,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -1279,9 +1281,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.nxv32i8.i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssub_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -1292,7 +1294,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -1302,10 +1304,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vssub.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1317,7 +1319,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -1326,9 +1328,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.nxv64i8.i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i32);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssub_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -1339,7 +1341,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -1349,10 +1351,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssub.mask.nxv64i8.i8(
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
@@ -1364,7 +1366,7 @@ entry:
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -1373,9 +1375,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.nxv1i16.i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssub_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
@@ -1386,7 +1388,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -1396,10 +1398,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vssub.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1411,7 +1413,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -1420,9 +1422,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.nxv2i16.i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssub_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
@@ -1433,7 +1435,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -1443,10 +1445,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vssub.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1458,7 +1460,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -1467,9 +1469,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.nxv4i16.i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssub_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
@@ -1480,7 +1482,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -1490,10 +1492,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vssub.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1505,7 +1507,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -1514,9 +1516,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.nxv8i16.i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssub_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
@@ -1527,7 +1529,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -1537,10 +1539,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vssub.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1552,7 +1554,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -1561,9 +1563,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.nxv16i16.i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssub_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
@@ -1574,7 +1576,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -1584,10 +1586,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vssub.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1599,7 +1601,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -1608,9 +1610,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.nxv32i16.i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i32);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssub_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
@@ -1621,7 +1623,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -1631,10 +1633,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vssub.mask.nxv32i16.i16(
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
@@ -1646,7 +1648,7 @@ entry:
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -1655,9 +1657,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.nxv1i32.i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssub_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
@@ -1668,7 +1670,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -1678,10 +1680,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vssub.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1693,7 +1695,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -1702,9 +1704,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.nxv2i32.i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssub_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -1715,7 +1717,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -1725,10 +1727,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vssub.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1740,7 +1742,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -1749,9 +1751,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.nxv4i32.i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssub_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
@@ -1762,7 +1764,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -1772,10 +1774,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vssub.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1787,7 +1789,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -1796,9 +1798,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.nxv8i32.i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssub_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
@@ -1809,7 +1811,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -1819,10 +1821,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vssub.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1834,7 +1836,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -1843,9 +1845,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.nxv16i32.i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i32);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssub_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssub_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
@@ -1856,7 +1858,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -1866,10 +1868,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vssub.mask.nxv16i32.i32(
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i32,
- i32);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
@@ -1881,7 +1883,7 @@ entry:
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -1890,26 +1892,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vssub.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -1919,28 +1927,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vssub.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vssub.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssub.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -1949,26 +1963,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vssub_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vssub.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -1978,28 +1998,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vssub.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vssub.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssub.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -2008,26 +2034,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vssub_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vssub.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -2037,28 +2069,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vssub.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vssub.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssub.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -2067,26 +2105,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vssub_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vssub.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssub.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i32 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -2096,28 +2140,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vssub.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vssub.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssub_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vssub.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssub.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i32 %4, i32 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
deleted file mode 100644
index 5e7147158d0f1..0000000000000
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv32.ll
+++ /dev/null
@@ -1,2123 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
-
-declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- <vscale x 1 x i8> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- <vscale x 2 x i8> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- <vscale x 4 x i8> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- <vscale x 8 x i8> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- <vscale x 16 x i8> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- <vscale x 32 x i8> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8r.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- <vscale x 64 x i8> %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- <vscale x 1 x i16> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- <vscale x 2 x i16> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- <vscale x 4 x i16> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- <vscale x 8 x i16> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- <vscale x 16 x i16> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re16.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- <vscale x 32 x i16> %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- <vscale x 1 x i32> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- <vscale x 2 x i32> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- <vscale x 4 x i32> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- <vscale x 8 x i32> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- <vscale x 16 x i32> %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- <vscale x 1 x i64> %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v10
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- <vscale x 2 x i64> %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v12
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- <vscale x 4 x i64> %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vssubu.vv v8, v8, v16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- <vscale x 8 x i64> %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
- <vscale x 1 x i8> undef,
- <vscale x 1 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
- <vscale x 1 x i8>,
- <vscale x 1 x i8>,
- i8,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i8> @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
- <vscale x 1 x i8> %0,
- <vscale x 1 x i8> %1,
- i8 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
- <vscale x 2 x i8> undef,
- <vscale x 2 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
- <vscale x 2 x i8>,
- <vscale x 2 x i8>,
- i8,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i8> @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
- <vscale x 2 x i8> %0,
- <vscale x 2 x i8> %1,
- i8 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
- <vscale x 4 x i8> undef,
- <vscale x 4 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
- <vscale x 4 x i8>,
- <vscale x 4 x i8>,
- i8,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i8> @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
- <vscale x 4 x i8> %0,
- <vscale x 4 x i8> %1,
- i8 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
- <vscale x 8 x i8> undef,
- <vscale x 8 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
- <vscale x 8 x i8>,
- <vscale x 8 x i8>,
- i8,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i8> @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
- <vscale x 8 x i8> %0,
- <vscale x 8 x i8> %1,
- i8 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
- <vscale x 16 x i8> undef,
- <vscale x 16 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
- <vscale x 16 x i8>,
- <vscale x 16 x i8>,
- i8,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i8> @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
- <vscale x 16 x i8> %0,
- <vscale x 16 x i8> %1,
- i8 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
- <vscale x 32 x i8> undef,
- <vscale x 32 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
- <vscale x 32 x i8>,
- <vscale x 32 x i8>,
- i8,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i8> @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
- <vscale x 32 x i8> %0,
- <vscale x 32 x i8> %1,
- i8 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
- <vscale x 64 x i8> undef,
- <vscale x 64 x i8> %0,
- i8 %1,
- i32 %2)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
- <vscale x 64 x i8>,
- <vscale x 64 x i8>,
- i8,
- <vscale x 64 x i1>,
- i32,
- i32);
-
-define <vscale x 64 x i8> @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
- <vscale x 64 x i8> %0,
- <vscale x 64 x i8> %1,
- i8 %2,
- <vscale x 64 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 64 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
- <vscale x 1 x i16> undef,
- <vscale x 1 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
- <vscale x 1 x i16>,
- <vscale x 1 x i16>,
- i16,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i16> @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
- <vscale x 1 x i16> %0,
- <vscale x 1 x i16> %1,
- i16 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
- <vscale x 2 x i16> undef,
- <vscale x 2 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
- <vscale x 2 x i16>,
- <vscale x 2 x i16>,
- i16,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i16> @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
- <vscale x 2 x i16> %0,
- <vscale x 2 x i16> %1,
- i16 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
- <vscale x 4 x i16> undef,
- <vscale x 4 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
- <vscale x 4 x i16>,
- <vscale x 4 x i16>,
- i16,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i16> @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
- <vscale x 4 x i16> %0,
- <vscale x 4 x i16> %1,
- i16 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
- <vscale x 8 x i16> undef,
- <vscale x 8 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
- <vscale x 8 x i16>,
- <vscale x 8 x i16>,
- i16,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i16> @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
- <vscale x 8 x i16> %0,
- <vscale x 8 x i16> %1,
- i16 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
- <vscale x 16 x i16> undef,
- <vscale x 16 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
- <vscale x 16 x i16>,
- <vscale x 16 x i16>,
- i16,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i16> @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
- <vscale x 16 x i16> %0,
- <vscale x 16 x i16> %1,
- i16 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
- <vscale x 32 x i16> undef,
- <vscale x 32 x i16> %0,
- i16 %1,
- i32 %2)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
- <vscale x 32 x i16>,
- <vscale x 32 x i16>,
- i16,
- <vscale x 32 x i1>,
- i32,
- i32);
-
-define <vscale x 32 x i16> @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
- <vscale x 32 x i16> %0,
- <vscale x 32 x i16> %1,
- i16 %2,
- <vscale x 32 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 32 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
- <vscale x 1 x i32> undef,
- <vscale x 1 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
- <vscale x 1 x i32>,
- <vscale x 1 x i32>,
- i32,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i32> @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
- <vscale x 1 x i32> %0,
- <vscale x 1 x i32> %1,
- i32 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
- <vscale x 2 x i32> undef,
- <vscale x 2 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
- <vscale x 2 x i32>,
- <vscale x 2 x i32>,
- i32,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i32> @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
- <vscale x 2 x i32> %0,
- <vscale x 2 x i32> %1,
- i32 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
- <vscale x 4 x i32> undef,
- <vscale x 4 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
- <vscale x 4 x i32>,
- <vscale x 4 x i32>,
- i32,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i32> @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
- <vscale x 4 x i32> %0,
- <vscale x 4 x i32> %1,
- i32 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
- <vscale x 8 x i32> undef,
- <vscale x 8 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
- <vscale x 8 x i32>,
- <vscale x 8 x i32>,
- i32,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i32> @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
- <vscale x 8 x i32> %0,
- <vscale x 8 x i32> %1,
- i32 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
- <vscale x 16 x i32> undef,
- <vscale x 16 x i32> %0,
- i32 %1,
- i32 %2)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
- <vscale x 16 x i32>,
- <vscale x 16 x i32>,
- i32,
- <vscale x 16 x i1>,
- i32,
- i32);
-
-define <vscale x 16 x i32> @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
- <vscale x 16 x i32> %0,
- <vscale x 16 x i32> %1,
- i32 %2,
- <vscale x 16 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 16 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
-; CHECK-NEXT: vlse64.v v9, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v8, v9
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
- <vscale x 1 x i64> undef,
- <vscale x 1 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
- <vscale x 1 x i64>,
- <vscale x 1 x i64>,
- i64,
- <vscale x 1 x i1>,
- i32,
- i32);
-
-define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v9, v10, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
- <vscale x 1 x i64> %0,
- <vscale x 1 x i64> %1,
- i64 %2,
- <vscale x 1 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
-; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v8, v10
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
- <vscale x 2 x i64> undef,
- <vscale x 2 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
- <vscale x 2 x i64>,
- <vscale x 2 x i64>,
- i64,
- <vscale x 2 x i1>,
- i32,
- i32);
-
-define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v10, v12, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
- <vscale x 2 x i64> %0,
- <vscale x 2 x i64> %1,
- i64 %2,
- <vscale x 2 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
-; CHECK-NEXT: vlse64.v v12, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v8, v12
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
- <vscale x 4 x i64> undef,
- <vscale x 4 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
- <vscale x 4 x i64>,
- <vscale x 4 x i64>,
- i64,
- <vscale x 4 x i1>,
- i32,
- i32);
-
-define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v12, v16, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
- <vscale x 4 x i64> %0,
- <vscale x 4 x i64> %1,
- i64 %2,
- <vscale x 4 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vlse64.v v16, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v8, v16
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
- <vscale x 8 x i64> undef,
- <vscale x 8 x i64> %0,
- i64 %1,
- i32 %2)
-
- ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
- <vscale x 8 x i64>,
- <vscale x 8 x i64>,
- i64,
- <vscale x 8 x i1>,
- i32,
- i32);
-
-define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: sw a1, 12(sp)
-; CHECK-NEXT: sw a0, 8(sp)
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
-; CHECK-NEXT: vlse64.v v24, (a0), zero
-; CHECK-NEXT: vssubu.vv v8, v16, v24, v0.t
-; CHECK-NEXT: addi sp, sp, 16
-; CHECK-NEXT: ret
-entry:
- %a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
- <vscale x 8 x i64> %0,
- <vscale x 8 x i64> %1,
- i64 %2,
- <vscale x 8 x i1> %3,
- i32 %4, i32 1)
-
- ret <vscale x 8 x i64> %a
-}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu.ll
similarity index 80%
rename from llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
rename to llvm/test/CodeGen/RISCV/rvv/vssubu.ll
index 71b623c555a5f..db1b4ce34e9b3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu.ll
@@ -1,14 +1,16 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
-; RUN: < %s | FileCheck %s
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
+; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
- i64);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
@@ -19,7 +21,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -29,10 +31,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i8_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
@@ -44,7 +46,7 @@ entry:
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -53,9 +55,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
- i64);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
@@ -66,7 +68,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -76,10 +78,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i8_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
@@ -91,7 +93,7 @@ entry:
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -100,9 +102,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
- i64);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -113,7 +115,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -123,10 +125,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i8_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
@@ -138,7 +140,7 @@ entry:
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -147,9 +149,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
- i64);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -160,7 +162,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -170,10 +172,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
@@ -185,7 +187,7 @@ entry:
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -194,9 +196,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
- i64);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
@@ -207,7 +209,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -217,10 +219,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i8_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
@@ -232,7 +234,7 @@ entry:
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -241,9 +243,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
- i64);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
@@ -254,7 +256,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -264,10 +266,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i8_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
@@ -279,7 +281,7 @@ entry:
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -288,9 +290,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
- i64);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -301,7 +303,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
<vscale x 64 x i8> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -311,10 +313,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i8> %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv64i8_nxv64i8_nxv64i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8r.v v24, (a0)
@@ -327,7 +329,7 @@ entry:
<vscale x 64 x i8> %1,
<vscale x 64 x i8> %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -336,9 +338,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
- i64);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
@@ -349,7 +351,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -359,10 +361,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i16_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
@@ -374,7 +376,7 @@ entry:
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -383,9 +385,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
- i64);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
@@ -396,7 +398,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -406,10 +408,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i16_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
@@ -421,7 +423,7 @@ entry:
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -430,9 +432,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
- i64);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
@@ -443,7 +445,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -453,10 +455,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
@@ -468,7 +470,7 @@ entry:
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -477,9 +479,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
- i64);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
@@ -490,7 +492,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -500,10 +502,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i16_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
@@ -515,7 +517,7 @@ entry:
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -524,9 +526,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
- i64);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
@@ -537,7 +539,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -547,10 +549,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i16_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
@@ -562,7 +564,7 @@ entry:
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -571,9 +573,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i16>,
- i64);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
@@ -584,7 +586,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -594,10 +596,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.nxv32i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i16> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv32i16_nxv32i16_nxv32i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re16.v v24, (a0)
@@ -610,7 +612,7 @@ entry:
<vscale x 32 x i16> %1,
<vscale x 32 x i16> %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -619,9 +621,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
- i64);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
@@ -632,7 +634,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -642,10 +644,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i32_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
@@ -657,7 +659,7 @@ entry:
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -666,9 +668,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
- i64);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
@@ -679,7 +681,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -689,10 +691,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
@@ -704,7 +706,7 @@ entry:
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -713,9 +715,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
- i64);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
@@ -726,7 +728,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -736,10 +738,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i32_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
@@ -751,7 +753,7 @@ entry:
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -760,9 +762,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
- i64);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
@@ -773,7 +775,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -783,10 +785,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i32_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
@@ -798,7 +800,7 @@ entry:
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -807,9 +809,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i32>,
- i64);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
@@ -820,7 +822,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -830,10 +832,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.nxv16i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i32> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv16i32_nxv16i32_nxv16i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re32.v v24, (a0)
@@ -846,7 +848,7 @@ entry:
<vscale x 16 x i32> %1,
<vscale x 16 x i32> %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -855,9 +857,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
- i64);
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
@@ -868,7 +870,7 @@ entry:
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -878,10 +880,10 @@ declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i64> @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
@@ -893,7 +895,7 @@ entry:
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -902,9 +904,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
- i64);
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
@@ -915,7 +917,7 @@ entry:
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -925,10 +927,10 @@ declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i64> @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv2i64_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
@@ -940,7 +942,7 @@ entry:
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -949,9 +951,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
- i64);
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
@@ -962,7 +964,7 @@ entry:
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -972,10 +974,10 @@ declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i64> @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv4i64_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
@@ -987,7 +989,7 @@ entry:
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -996,9 +998,9 @@ declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i64>,
- i64);
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
@@ -1009,7 +1011,7 @@ entry:
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -1019,10 +1021,10 @@ declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.nxv8i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vv_nxv8i64_nxv8i64_nxv8i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vl8re64.v v24, (a0)
@@ -1035,7 +1037,7 @@ entry:
<vscale x 8 x i64> %1,
<vscale x 8 x i64> %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
@@ -1044,9 +1046,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.nxv1i8.i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
@@ -1057,7 +1059,7 @@ entry:
<vscale x 1 x i8> undef,
<vscale x 1 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i8> %a
}
@@ -1067,10 +1069,10 @@ declare <vscale x 1 x i8> @llvm.riscv.vssubu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i8> @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
@@ -1082,7 +1084,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i8> %a
}
@@ -1091,9 +1093,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.nxv2i8.i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
@@ -1104,7 +1106,7 @@ entry:
<vscale x 2 x i8> undef,
<vscale x 2 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i8> %a
}
@@ -1114,10 +1116,10 @@ declare <vscale x 2 x i8> @llvm.riscv.vssubu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i8> @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
@@ -1129,7 +1131,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i8> %a
}
@@ -1138,9 +1140,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.nxv4i8.i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
@@ -1151,7 +1153,7 @@ entry:
<vscale x 4 x i8> undef,
<vscale x 4 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i8> %a
}
@@ -1161,10 +1163,10 @@ declare <vscale x 4 x i8> @llvm.riscv.vssubu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i8> @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i8_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
@@ -1176,7 +1178,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i8> %a
}
@@ -1185,9 +1187,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.nxv8i8.i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
@@ -1198,7 +1200,7 @@ entry:
<vscale x 8 x i8> undef,
<vscale x 8 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i8> %a
}
@@ -1208,10 +1210,10 @@ declare <vscale x 8 x i8> @llvm.riscv.vssubu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i8> @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i8_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
@@ -1223,7 +1225,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i8> %a
}
@@ -1232,9 +1234,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.nxv16i8.i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
@@ -1245,7 +1247,7 @@ entry:
<vscale x 16 x i8> undef,
<vscale x 16 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i8> %a
}
@@ -1255,10 +1257,10 @@ declare <vscale x 16 x i8> @llvm.riscv.vssubu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i8> @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i8_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
@@ -1270,7 +1272,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i8> %a
}
@@ -1279,9 +1281,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.nxv32i8.i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
@@ -1292,7 +1294,7 @@ entry:
<vscale x 32 x i8> undef,
<vscale x 32 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i8> %a
}
@@ -1302,10 +1304,10 @@ declare <vscale x 32 x i8> @llvm.riscv.vssubu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i8> @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i8_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
@@ -1317,7 +1319,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i8> %a
}
@@ -1326,9 +1328,9 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.nxv64i8.i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i8,
- i64);
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
@@ -1339,7 +1341,7 @@ entry:
<vscale x 64 x i8> undef,
<vscale x 64 x i8> %0,
i8 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 64 x i8> %a
}
@@ -1349,10 +1351,10 @@ declare <vscale x 64 x i8> @llvm.riscv.vssubu.mask.nxv64i8.i8(
<vscale x 64 x i8>,
i8,
<vscale x 64 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 64 x i8> @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i8 %2, <vscale x 64 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv64i8_nxv64i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
@@ -1364,7 +1366,7 @@ entry:
<vscale x 64 x i8> %1,
i8 %2,
<vscale x 64 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 64 x i8> %a
}
@@ -1373,9 +1375,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.nxv1i16.i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
@@ -1386,7 +1388,7 @@ entry:
<vscale x 1 x i16> undef,
<vscale x 1 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i16> %a
}
@@ -1396,10 +1398,10 @@ declare <vscale x 1 x i16> @llvm.riscv.vssubu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i16> @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i16_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
@@ -1411,7 +1413,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i16> %a
}
@@ -1420,9 +1422,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.nxv2i16.i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
@@ -1433,7 +1435,7 @@ entry:
<vscale x 2 x i16> undef,
<vscale x 2 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i16> %a
}
@@ -1443,10 +1445,10 @@ declare <vscale x 2 x i16> @llvm.riscv.vssubu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i16> @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i16_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
@@ -1458,7 +1460,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i16> %a
}
@@ -1467,9 +1469,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.nxv4i16.i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
@@ -1480,7 +1482,7 @@ entry:
<vscale x 4 x i16> undef,
<vscale x 4 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i16> %a
}
@@ -1490,10 +1492,10 @@ declare <vscale x 4 x i16> @llvm.riscv.vssubu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i16> @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i16_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
@@ -1505,7 +1507,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i16> %a
}
@@ -1514,9 +1516,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.nxv8i16.i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
@@ -1527,7 +1529,7 @@ entry:
<vscale x 8 x i16> undef,
<vscale x 8 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i16> %a
}
@@ -1537,10 +1539,10 @@ declare <vscale x 8 x i16> @llvm.riscv.vssubu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i16> @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i16_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
@@ -1552,7 +1554,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i16> %a
}
@@ -1561,9 +1563,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.nxv16i16.i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
@@ -1574,7 +1576,7 @@ entry:
<vscale x 16 x i16> undef,
<vscale x 16 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i16> %a
}
@@ -1584,10 +1586,10 @@ declare <vscale x 16 x i16> @llvm.riscv.vssubu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i16> @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i16_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
@@ -1599,7 +1601,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i16> %a
}
@@ -1608,9 +1610,9 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.nxv32i16.i16(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i16,
- i64);
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
@@ -1621,7 +1623,7 @@ entry:
<vscale x 32 x i16> undef,
<vscale x 32 x i16> %0,
i16 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 32 x i16> %a
}
@@ -1631,10 +1633,10 @@ declare <vscale x 32 x i16> @llvm.riscv.vssubu.mask.nxv32i16.i16(
<vscale x 32 x i16>,
i16,
<vscale x 32 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 32 x i16> @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i16 %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv32i16_nxv32i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
@@ -1646,7 +1648,7 @@ entry:
<vscale x 32 x i16> %1,
i16 %2,
<vscale x 32 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 32 x i16> %a
}
@@ -1655,9 +1657,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.nxv1i32.i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
@@ -1668,7 +1670,7 @@ entry:
<vscale x 1 x i32> undef,
<vscale x 1 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i32> %a
}
@@ -1678,10 +1680,10 @@ declare <vscale x 1 x i32> @llvm.riscv.vssubu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 1 x i32> @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i32_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
@@ -1693,7 +1695,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i32> %a
}
@@ -1702,9 +1704,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.nxv2i32.i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
@@ -1715,7 +1717,7 @@ entry:
<vscale x 2 x i32> undef,
<vscale x 2 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i32> %a
}
@@ -1725,10 +1727,10 @@ declare <vscale x 2 x i32> @llvm.riscv.vssubu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 2 x i32> @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i32_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
@@ -1740,7 +1742,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i32> %a
}
@@ -1749,9 +1751,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.nxv4i32.i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
@@ -1762,7 +1764,7 @@ entry:
<vscale x 4 x i32> undef,
<vscale x 4 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i32> %a
}
@@ -1772,10 +1774,10 @@ declare <vscale x 4 x i32> @llvm.riscv.vssubu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 4 x i32> @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i32_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
@@ -1787,7 +1789,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i32> %a
}
@@ -1796,9 +1798,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.nxv8i32.i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
@@ -1809,7 +1811,7 @@ entry:
<vscale x 8 x i32> undef,
<vscale x 8 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i32> %a
}
@@ -1819,10 +1821,10 @@ declare <vscale x 8 x i32> @llvm.riscv.vssubu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 8 x i32> @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i32_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
@@ -1834,7 +1836,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i32> %a
}
@@ -1843,9 +1845,9 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.nxv16i32.i32(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i32,
- i64);
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vssubu_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
@@ -1856,7 +1858,7 @@ entry:
<vscale x 16 x i32> undef,
<vscale x 16 x i32> %0,
i32 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 16 x i32> %a
}
@@ -1866,10 +1868,10 @@ declare <vscale x 16 x i32> @llvm.riscv.vssubu.mask.nxv16i32.i32(
<vscale x 16 x i32>,
i32,
<vscale x 16 x i1>,
- i64,
- i64);
+ iXLen,
+ iXLen)
-define <vscale x 16 x i32> @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv16i32_nxv16i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
@@ -1881,7 +1883,7 @@ entry:
<vscale x 16 x i32> %1,
i32 %2,
<vscale x 16 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 16 x i32> %a
}
@@ -1890,20 +1892,32 @@ declare <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v9, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v8, v9
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.nxv1i64.i64(
<vscale x 1 x i64> undef,
<vscale x 1 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 1 x i64> %a
}
@@ -1913,22 +1927,34 @@ declare <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
- i64,
- i64);
-
-define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v9, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 1 x i64> @intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v9, v10, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_mask_vx_nxv1i64_nxv1i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vssubu.vx v8, v9, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vssubu.mask.nxv1i64.i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 1 x i64> %a
}
@@ -1937,20 +1963,32 @@ declare <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v10, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v8, v10
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.nxv2i64.i64(
<vscale x 2 x i64> undef,
<vscale x 2 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 2 x i64> %a
}
@@ -1960,22 +1998,34 @@ declare <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
- i64,
- i64);
-
-define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v10, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 2 x i64> @intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v10, v12, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_mask_vx_nxv2i64_nxv2i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vssubu.vx v8, v10, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 2 x i64> @llvm.riscv.vssubu.mask.nxv2i64.i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 2 x i64> %a
}
@@ -1984,20 +2034,32 @@ declare <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v8, v12
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.nxv4i64.i64(
<vscale x 4 x i64> undef,
<vscale x 4 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 4 x i64> %a
}
@@ -2007,22 +2069,34 @@ declare <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
- i64,
- i64);
-
-define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v12, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 4 x i64> @intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v12, v16, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_mask_vx_nxv4i64_nxv4i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vssubu.vx v8, v12, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 4 x i64> @llvm.riscv.vssubu.mask.nxv4i64.i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 4 x i64> %a
}
@@ -2031,20 +2105,32 @@ declare <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
<vscale x 8 x i64>,
<vscale x 8 x i64>,
i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v8, v8, a0
-; CHECK-NEXT: ret
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
+; RV32-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v8, v16
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV64-NEXT: vssubu.vx v8, v8, a0
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.nxv8i64.i64(
<vscale x 8 x i64> undef,
<vscale x 8 x i64> %0,
i64 %1,
- i64 %2)
+ iXLen %2)
ret <vscale x 8 x i64> %a
}
@@ -2054,22 +2140,34 @@ declare <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
<vscale x 8 x i64>,
i64,
<vscale x 8 x i1>,
- i64,
- i64);
-
-define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
-; CHECK-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT: vssubu.vx v8, v16, a0, v0.t
-; CHECK-NEXT: ret
+ iXLen,
+ iXLen)
+
+define <vscale x 8 x i64> @intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
+; RV32-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV32: # %bb.0: # %entry
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vlse64.v v24, (a0), zero
+; RV32-NEXT: vssubu.vv v8, v16, v24, v0.t
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: intrinsic_vssubu_mask_vx_nxv8i64_nxv8i64_i64:
+; RV64: # %bb.0: # %entry
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vssubu.vx v8, v16, a0, v0.t
+; RV64-NEXT: ret
entry:
%a = call <vscale x 8 x i64> @llvm.riscv.vssubu.mask.nxv8i64.i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64> %1,
i64 %2,
<vscale x 8 x i1> %3,
- i64 %4, i64 1)
+ iXLen %4, iXLen 1)
ret <vscale x 8 x i64> %a
}
More information about the llvm-commits
mailing list