[llvm-branch-commits] [llvm] 6cab3f8 - [RISCV] Use update_llc_test_checks.py to regenerate check lines in vleff-rv32.ll and vleff-rv64.ll.
Craig Topper via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Wed Jan 20 18:58:45 PST 2021
Author: Craig Topper
Date: 2021-01-20T18:51:02-08:00
New Revision: 6cab3f88ee4dbc59c8c5abb70490fea3f3f6d46c
URL: https://github.com/llvm/llvm-project/commit/6cab3f88ee4dbc59c8c5abb70490fea3f3f6d46c
DIFF: https://github.com/llvm/llvm-project/commit/6cab3f88ee4dbc59c8c5abb70490fea3f3f6d46c.diff
LOG: [RISCV] Use update_llc_test_checks.py to regenerate check lines in vleff-rv32.ll and vleff-rv64.ll.
This should minimize change in a future patch.
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
index 04987997b8e5..684f4356a3d0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv32.ll
@@ -1,14 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
+; RUN: < %s | FileCheck %s
declare <vscale x 1 x i32> @llvm.riscv.vleff.nxv1i32(
<vscale x 1 x i32>*,
i32);
define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x i32> @llvm.riscv.vleff.nxv1i32(
<vscale x 1 x i32>* %0,
i32 %1)
@@ -23,10 +26,12 @@ declare <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
i32);
define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32>* %1,
@@ -41,10 +46,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vleff.nxv2i32(
i32);
define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x i32> @llvm.riscv.vleff.nxv2i32(
<vscale x 2 x i32>* %0,
i32 %1)
@@ -59,10 +66,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
i32);
define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32>* %1,
@@ -77,10 +86,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vleff.nxv4i32(
i32);
define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x i32> @llvm.riscv.vleff.nxv4i32(
<vscale x 4 x i32>* %0,
i32 %1)
@@ -95,10 +106,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
i32);
define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32>* %1,
@@ -113,10 +126,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vleff.nxv8i32(
i32);
define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x i32> @llvm.riscv.vleff.nxv8i32(
<vscale x 8 x i32>* %0,
i32 %1)
@@ -131,10 +146,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
i32);
define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32>* %1,
@@ -149,10 +166,12 @@ declare <vscale x 16 x i32> @llvm.riscv.vleff.nxv16i32(
i32);
define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x i32> @llvm.riscv.vleff.nxv16i32(
<vscale x 16 x i32>* %0,
i32 %1)
@@ -167,10 +186,12 @@ declare <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
i32);
define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
<vscale x 16 x i32> %0,
<vscale x 16 x i32>* %1,
@@ -185,10 +206,12 @@ declare <vscale x 1 x float> @llvm.riscv.vleff.nxv1f32(
i32);
define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x float> @llvm.riscv.vleff.nxv1f32(
<vscale x 1 x float>* %0,
i32 %1)
@@ -203,10 +226,12 @@ declare <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
i32);
define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float>* %1,
@@ -221,10 +246,12 @@ declare <vscale x 2 x float> @llvm.riscv.vleff.nxv2f32(
i32);
define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x float> @llvm.riscv.vleff.nxv2f32(
<vscale x 2 x float>* %0,
i32 %1)
@@ -239,10 +266,12 @@ declare <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
i32);
define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float>* %1,
@@ -257,10 +286,12 @@ declare <vscale x 4 x float> @llvm.riscv.vleff.nxv4f32(
i32);
define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x float> @llvm.riscv.vleff.nxv4f32(
<vscale x 4 x float>* %0,
i32 %1)
@@ -275,10 +306,12 @@ declare <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
i32);
define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float>* %1,
@@ -293,10 +326,12 @@ declare <vscale x 8 x float> @llvm.riscv.vleff.nxv8f32(
i32);
define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x float> @llvm.riscv.vleff.nxv8f32(
<vscale x 8 x float>* %0,
i32 %1)
@@ -311,10 +346,12 @@ declare <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
i32);
define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float>* %1,
@@ -329,10 +366,12 @@ declare <vscale x 16 x float> @llvm.riscv.vleff.nxv16f32(
i32);
define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x float> @llvm.riscv.vleff.nxv16f32(
<vscale x 16 x float>* %0,
i32 %1)
@@ -347,10 +386,12 @@ declare <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
i32);
define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
<vscale x 16 x float> %0,
<vscale x 16 x float>* %1,
@@ -365,10 +406,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vleff.nxv1i16(
i32);
define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x i16> @llvm.riscv.vleff.nxv1i16(
<vscale x 1 x i16>* %0,
i32 %1)
@@ -383,10 +426,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
i32);
define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16>* %1,
@@ -401,10 +446,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vleff.nxv2i16(
i32);
define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x i16> @llvm.riscv.vleff.nxv2i16(
<vscale x 2 x i16>* %0,
i32 %1)
@@ -419,10 +466,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
i32);
define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16>* %1,
@@ -437,10 +486,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vleff.nxv4i16(
i32);
define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x i16> @llvm.riscv.vleff.nxv4i16(
<vscale x 4 x i16>* %0,
i32 %1)
@@ -455,10 +506,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
i32);
define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16>* %1,
@@ -473,10 +526,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vleff.nxv8i16(
i32);
define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x i16> @llvm.riscv.vleff.nxv8i16(
<vscale x 8 x i16>* %0,
i32 %1)
@@ -491,10 +546,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
i32);
define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16>* %1,
@@ -509,10 +566,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vleff.nxv16i16(
i32);
define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x i16> @llvm.riscv.vleff.nxv16i16(
<vscale x 16 x i16>* %0,
i32 %1)
@@ -527,10 +586,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
i32);
define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16>* %1,
@@ -545,10 +606,12 @@ declare <vscale x 32 x i16> @llvm.riscv.vleff.nxv32i16(
i32);
define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 32 x i16> @llvm.riscv.vleff.nxv32i16(
<vscale x 32 x i16>* %0,
i32 %1)
@@ -563,10 +626,12 @@ declare <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
i32);
define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
<vscale x 32 x i16> %0,
<vscale x 32 x i16>* %1,
@@ -581,10 +646,12 @@ declare <vscale x 1 x half> @llvm.riscv.vleff.nxv1f16(
i32);
define <vscale x 1 x half> @intrinsic_vleff_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f16_nxv1f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x half> @llvm.riscv.vleff.nxv1f16(
<vscale x 1 x half>* %0,
i32 %1)
@@ -599,10 +666,12 @@ declare <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
i32);
define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half>* %1,
@@ -617,10 +686,12 @@ declare <vscale x 2 x half> @llvm.riscv.vleff.nxv2f16(
i32);
define <vscale x 2 x half> @intrinsic_vleff_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f16_nxv2f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x half> @llvm.riscv.vleff.nxv2f16(
<vscale x 2 x half>* %0,
i32 %1)
@@ -635,10 +706,12 @@ declare <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
i32);
define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half>* %1,
@@ -653,10 +726,12 @@ declare <vscale x 4 x half> @llvm.riscv.vleff.nxv4f16(
i32);
define <vscale x 4 x half> @intrinsic_vleff_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f16_nxv4f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x half> @llvm.riscv.vleff.nxv4f16(
<vscale x 4 x half>* %0,
i32 %1)
@@ -671,10 +746,12 @@ declare <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
i32);
define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half>* %1,
@@ -689,10 +766,12 @@ declare <vscale x 8 x half> @llvm.riscv.vleff.nxv8f16(
i32);
define <vscale x 8 x half> @intrinsic_vleff_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f16_nxv8f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x half> @llvm.riscv.vleff.nxv8f16(
<vscale x 8 x half>* %0,
i32 %1)
@@ -707,10 +786,12 @@ declare <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
i32);
define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half>* %1,
@@ -725,10 +806,12 @@ declare <vscale x 16 x half> @llvm.riscv.vleff.nxv16f16(
i32);
define <vscale x 16 x half> @intrinsic_vleff_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f16_nxv16f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x half> @llvm.riscv.vleff.nxv16f16(
<vscale x 16 x half>* %0,
i32 %1)
@@ -743,10 +826,12 @@ declare <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
i32);
define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half>* %1,
@@ -761,10 +846,12 @@ declare <vscale x 32 x half> @llvm.riscv.vleff.nxv32f16(
i32);
define <vscale x 32 x half> @intrinsic_vleff_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv32f16_nxv32f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 32 x half> @llvm.riscv.vleff.nxv32f16(
<vscale x 32 x half>* %0,
i32 %1)
@@ -779,10 +866,12 @@ declare <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
i32);
define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
<vscale x 32 x half> %0,
<vscale x 32 x half>* %1,
@@ -797,10 +886,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vleff.nxv1i8(
i32);
define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x i8> @llvm.riscv.vleff.nxv1i8(
<vscale x 1 x i8>* %0,
i32 %1)
@@ -815,10 +906,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
i32);
define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8>* %1,
@@ -833,10 +926,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vleff.nxv2i8(
i32);
define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x i8> @llvm.riscv.vleff.nxv2i8(
<vscale x 2 x i8>* %0,
i32 %1)
@@ -851,10 +946,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
i32);
define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8>* %1,
@@ -869,10 +966,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vleff.nxv4i8(
i32);
define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x i8> @llvm.riscv.vleff.nxv4i8(
<vscale x 4 x i8>* %0,
i32 %1)
@@ -887,10 +986,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
i32);
define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8>* %1,
@@ -905,10 +1006,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vleff.nxv8i8(
i32);
define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x i8> @llvm.riscv.vleff.nxv8i8(
<vscale x 8 x i8>* %0,
i32 %1)
@@ -923,10 +1026,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
i32);
define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8>* %1,
@@ -941,10 +1046,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vleff.nxv16i8(
i32);
define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x i8> @llvm.riscv.vleff.nxv16i8(
<vscale x 16 x i8>* %0,
i32 %1)
@@ -959,10 +1066,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
i32);
define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8>* %1,
@@ -977,10 +1086,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vleff.nxv32i8(
i32);
define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 32 x i8> @llvm.riscv.vleff.nxv32i8(
<vscale x 32 x i8>* %0,
i32 %1)
@@ -995,10 +1106,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
i32);
define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8>* %1,
@@ -1013,10 +1126,12 @@ declare <vscale x 64 x i8> @llvm.riscv.vleff.nxv64i8(
i32);
define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i32 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 64 x i8> @llvm.riscv.vleff.nxv64i8(
<vscale x 64 x i8>* %0,
i32 %1)
@@ -1031,10 +1146,12 @@ declare <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
i32);
define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
<vscale x 64 x i8> %0,
<vscale x 64 x i8>* %1,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
index 17c3cd5d1f8b..32c2fc60bb9b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff-rv64.ll
@@ -1,14 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
-; RUN: --riscv-no-aliases < %s | FileCheck %s
+; RUN: < %s | FileCheck %s
declare <vscale x 1 x i64> @llvm.riscv.vleff.nxv1i64(
<vscale x 1 x i64>*,
i64);
define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x i64> @llvm.riscv.vleff.nxv1i64(
<vscale x 1 x i64>* %0,
i64 %1)
@@ -23,10 +26,12 @@ declare <vscale x 1 x i64> @llvm.riscv.vleff.mask.nxv1i64(
i64);
define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x i64> @llvm.riscv.vleff.mask.nxv1i64(
<vscale x 1 x i64> %0,
<vscale x 1 x i64>* %1,
@@ -41,10 +46,12 @@ declare <vscale x 2 x i64> @llvm.riscv.vleff.nxv2i64(
i64);
define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x i64> @llvm.riscv.vleff.nxv2i64(
<vscale x 2 x i64>* %0,
i64 %1)
@@ -59,10 +66,12 @@ declare <vscale x 2 x i64> @llvm.riscv.vleff.mask.nxv2i64(
i64);
define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x i64> @llvm.riscv.vleff.mask.nxv2i64(
<vscale x 2 x i64> %0,
<vscale x 2 x i64>* %1,
@@ -77,10 +86,12 @@ declare <vscale x 4 x i64> @llvm.riscv.vleff.nxv4i64(
i64);
define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x i64> @llvm.riscv.vleff.nxv4i64(
<vscale x 4 x i64>* %0,
i64 %1)
@@ -95,10 +106,12 @@ declare <vscale x 4 x i64> @llvm.riscv.vleff.mask.nxv4i64(
i64);
define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x i64> @llvm.riscv.vleff.mask.nxv4i64(
<vscale x 4 x i64> %0,
<vscale x 4 x i64>* %1,
@@ -113,10 +126,12 @@ declare <vscale x 8 x i64> @llvm.riscv.vleff.nxv8i64(
i64);
define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x i64> @llvm.riscv.vleff.nxv8i64(
<vscale x 8 x i64>* %0,
i64 %1)
@@ -131,10 +146,12 @@ declare <vscale x 8 x i64> @llvm.riscv.vleff.mask.nxv8i64(
i64);
define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x i64> @llvm.riscv.vleff.mask.nxv8i64(
<vscale x 8 x i64> %0,
<vscale x 8 x i64>* %1,
@@ -149,10 +166,12 @@ declare <vscale x 1 x double> @llvm.riscv.vleff.nxv1f64(
i64);
define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(<vscale x 1 x double>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x double> @llvm.riscv.vleff.nxv1f64(
<vscale x 1 x double>* %0,
i64 %1)
@@ -167,10 +186,12 @@ declare <vscale x 1 x double> @llvm.riscv.vleff.mask.nxv1f64(
i64);
define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m1,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vleff.mask.nxv1f64(
<vscale x 1 x double> %0,
<vscale x 1 x double>* %1,
@@ -185,10 +206,12 @@ declare <vscale x 2 x double> @llvm.riscv.vleff.nxv2f64(
i64);
define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(<vscale x 2 x double>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x double> @llvm.riscv.vleff.nxv2f64(
<vscale x 2 x double>* %0,
i64 %1)
@@ -203,10 +226,12 @@ declare <vscale x 2 x double> @llvm.riscv.vleff.mask.nxv2f64(
i64);
define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m2,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vleff.mask.nxv2f64(
<vscale x 2 x double> %0,
<vscale x 2 x double>* %1,
@@ -221,10 +246,12 @@ declare <vscale x 4 x double> @llvm.riscv.vleff.nxv4f64(
i64);
define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(<vscale x 4 x double>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x double> @llvm.riscv.vleff.nxv4f64(
<vscale x 4 x double>* %0,
i64 %1)
@@ -239,10 +266,12 @@ declare <vscale x 4 x double> @llvm.riscv.vleff.mask.nxv4f64(
i64);
define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m4,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vleff.mask.nxv4f64(
<vscale x 4 x double> %0,
<vscale x 4 x double>* %1,
@@ -257,10 +286,12 @@ declare <vscale x 8 x double> @llvm.riscv.vleff.nxv8f64(
i64);
define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(<vscale x 8 x double>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vle64ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x double> @llvm.riscv.vleff.nxv8f64(
<vscale x 8 x double>* %0,
i64 %1)
@@ -275,10 +306,12 @@ declare <vscale x 8 x double> @llvm.riscv.vleff.mask.nxv8f64(
i64);
define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e64,m8,tu,mu
+; CHECK-NEXT: vle64ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,tu,mu
-; CHECK: vle64ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vleff.mask.nxv8f64(
<vscale x 8 x double> %0,
<vscale x 8 x double>* %1,
@@ -293,10 +326,12 @@ declare <vscale x 1 x i32> @llvm.riscv.vleff.nxv1i32(
i64);
define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(<vscale x 1 x i32>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x i32> @llvm.riscv.vleff.nxv1i32(
<vscale x 1 x i32>* %0,
i64 %1)
@@ -311,10 +346,12 @@ declare <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
i64);
define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vleff.mask.nxv1i32(
<vscale x 1 x i32> %0,
<vscale x 1 x i32>* %1,
@@ -329,10 +366,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vleff.nxv2i32(
i64);
define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(<vscale x 2 x i32>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x i32> @llvm.riscv.vleff.nxv2i32(
<vscale x 2 x i32>* %0,
i64 %1)
@@ -347,10 +386,12 @@ declare <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
i64);
define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vleff.mask.nxv2i32(
<vscale x 2 x i32> %0,
<vscale x 2 x i32>* %1,
@@ -365,10 +406,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vleff.nxv4i32(
i64);
define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(<vscale x 4 x i32>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x i32> @llvm.riscv.vleff.nxv4i32(
<vscale x 4 x i32>* %0,
i64 %1)
@@ -383,10 +426,12 @@ declare <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
i64);
define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vleff.mask.nxv4i32(
<vscale x 4 x i32> %0,
<vscale x 4 x i32>* %1,
@@ -401,10 +446,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vleff.nxv8i32(
i64);
define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(<vscale x 8 x i32>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x i32> @llvm.riscv.vleff.nxv8i32(
<vscale x 8 x i32>* %0,
i64 %1)
@@ -419,10 +466,12 @@ declare <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
i64);
define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vleff.mask.nxv8i32(
<vscale x 8 x i32> %0,
<vscale x 8 x i32>* %1,
@@ -437,10 +486,12 @@ declare <vscale x 16 x i32> @llvm.riscv.vleff.nxv16i32(
i64);
define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(<vscale x 16 x i32>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x i32> @llvm.riscv.vleff.nxv16i32(
<vscale x 16 x i32>* %0,
i64 %1)
@@ -455,10 +506,12 @@ declare <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
i64);
define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vleff.mask.nxv16i32(
<vscale x 16 x i32> %0,
<vscale x 16 x i32>* %1,
@@ -473,10 +526,12 @@ declare <vscale x 1 x float> @llvm.riscv.vleff.nxv1f32(
i64);
define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(<vscale x 1 x float>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x float> @llvm.riscv.vleff.nxv1f32(
<vscale x 1 x float>* %0,
i64 %1)
@@ -491,10 +546,12 @@ declare <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
i64);
define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,mf2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vleff.mask.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float>* %1,
@@ -509,10 +566,12 @@ declare <vscale x 2 x float> @llvm.riscv.vleff.nxv2f32(
i64);
define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(<vscale x 2 x float>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x float> @llvm.riscv.vleff.nxv2f32(
<vscale x 2 x float>* %0,
i64 %1)
@@ -527,10 +586,12 @@ declare <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
i64);
define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m1,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vleff.mask.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float>* %1,
@@ -545,10 +606,12 @@ declare <vscale x 4 x float> @llvm.riscv.vleff.nxv4f32(
i64);
define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(<vscale x 4 x float>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x float> @llvm.riscv.vleff.nxv4f32(
<vscale x 4 x float>* %0,
i64 %1)
@@ -563,10 +626,12 @@ declare <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
i64);
define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m2,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vleff.mask.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float>* %1,
@@ -581,10 +646,12 @@ declare <vscale x 8 x float> @llvm.riscv.vleff.nxv8f32(
i64);
define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(<vscale x 8 x float>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x float> @llvm.riscv.vleff.nxv8f32(
<vscale x 8 x float>* %0,
i64 %1)
@@ -599,10 +666,12 @@ declare <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
i64);
define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m4,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vleff.mask.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float>* %1,
@@ -617,10 +686,12 @@ declare <vscale x 16 x float> @llvm.riscv.vleff.nxv16f32(
i64);
define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(<vscale x 16 x float>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vle32ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x float> @llvm.riscv.vleff.nxv16f32(
<vscale x 16 x float>* %0,
i64 %1)
@@ -635,10 +706,12 @@ declare <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
i64);
define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e32,m8,tu,mu
+; CHECK-NEXT: vle32ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
-; CHECK: vle32ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vleff.mask.nxv16f32(
<vscale x 16 x float> %0,
<vscale x 16 x float>* %1,
@@ -653,10 +726,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vleff.nxv1i16(
i64);
define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(<vscale x 1 x i16>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x i16> @llvm.riscv.vleff.nxv1i16(
<vscale x 1 x i16>* %0,
i64 %1)
@@ -671,10 +746,12 @@ declare <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
i64);
define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vleff.mask.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16>* %1,
@@ -689,10 +766,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vleff.nxv2i16(
i64);
define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(<vscale x 2 x i16>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x i16> @llvm.riscv.vleff.nxv2i16(
<vscale x 2 x i16>* %0,
i64 %1)
@@ -707,10 +786,12 @@ declare <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
i64);
define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vleff.mask.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16>* %1,
@@ -725,10 +806,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vleff.nxv4i16(
i64);
define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(<vscale x 4 x i16>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x i16> @llvm.riscv.vleff.nxv4i16(
<vscale x 4 x i16>* %0,
i64 %1)
@@ -743,10 +826,12 @@ declare <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
i64);
define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vleff.mask.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16>* %1,
@@ -761,10 +846,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vleff.nxv8i16(
i64);
define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(<vscale x 8 x i16>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x i16> @llvm.riscv.vleff.nxv8i16(
<vscale x 8 x i16>* %0,
i64 %1)
@@ -779,10 +866,12 @@ declare <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
i64);
define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vleff.mask.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16>* %1,
@@ -797,10 +886,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vleff.nxv16i16(
i64);
define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(<vscale x 16 x i16>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x i16> @llvm.riscv.vleff.nxv16i16(
<vscale x 16 x i16>* %0,
i64 %1)
@@ -815,10 +906,12 @@ declare <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
i64);
define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vleff.mask.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16>* %1,
@@ -833,10 +926,12 @@ declare <vscale x 32 x i16> @llvm.riscv.vleff.nxv32i16(
i64);
define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(<vscale x 32 x i16>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 32 x i16> @llvm.riscv.vleff.nxv32i16(
<vscale x 32 x i16>* %0,
i64 %1)
@@ -851,10 +946,12 @@ declare <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
i64);
define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vleff.mask.nxv32i16(
<vscale x 32 x i16> %0,
<vscale x 32 x i16>* %1,
@@ -869,10 +966,12 @@ declare <vscale x 1 x half> @llvm.riscv.vleff.nxv1f16(
i64);
define <vscale x 1 x half> @intrinsic_vleff_v_nxv1f16_nxv1f16(<vscale x 1 x half>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1f16_nxv1f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x half> @llvm.riscv.vleff.nxv1f16(
<vscale x 1 x half>* %0,
i64 %1)
@@ -887,10 +986,12 @@ declare <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
i64);
define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1f16_nxv1f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vleff.mask.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half>* %1,
@@ -905,10 +1006,12 @@ declare <vscale x 2 x half> @llvm.riscv.vleff.nxv2f16(
i64);
define <vscale x 2 x half> @intrinsic_vleff_v_nxv2f16_nxv2f16(<vscale x 2 x half>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2f16_nxv2f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x half> @llvm.riscv.vleff.nxv2f16(
<vscale x 2 x half>* %0,
i64 %1)
@@ -923,10 +1026,12 @@ declare <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
i64);
define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,mf2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2f16_nxv2f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vleff.mask.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half>* %1,
@@ -941,10 +1046,12 @@ declare <vscale x 4 x half> @llvm.riscv.vleff.nxv4f16(
i64);
define <vscale x 4 x half> @intrinsic_vleff_v_nxv4f16_nxv4f16(<vscale x 4 x half>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4f16_nxv4f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x half> @llvm.riscv.vleff.nxv4f16(
<vscale x 4 x half>* %0,
i64 %1)
@@ -959,10 +1066,12 @@ declare <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
i64);
define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m1,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4f16_nxv4f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vleff.mask.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half>* %1,
@@ -977,10 +1086,12 @@ declare <vscale x 8 x half> @llvm.riscv.vleff.nxv8f16(
i64);
define <vscale x 8 x half> @intrinsic_vleff_v_nxv8f16_nxv8f16(<vscale x 8 x half>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8f16_nxv8f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x half> @llvm.riscv.vleff.nxv8f16(
<vscale x 8 x half>* %0,
i64 %1)
@@ -995,10 +1106,12 @@ declare <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
i64);
define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m2,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8f16_nxv8f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vleff.mask.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half>* %1,
@@ -1013,10 +1126,12 @@ declare <vscale x 16 x half> @llvm.riscv.vleff.nxv16f16(
i64);
define <vscale x 16 x half> @intrinsic_vleff_v_nxv16f16_nxv16f16(<vscale x 16 x half>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16f16_nxv16f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x half> @llvm.riscv.vleff.nxv16f16(
<vscale x 16 x half>* %0,
i64 %1)
@@ -1031,10 +1146,12 @@ declare <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
i64);
define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16f16_nxv16f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vleff.mask.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half>* %1,
@@ -1049,10 +1166,12 @@ declare <vscale x 32 x half> @llvm.riscv.vleff.nxv32f16(
i64);
define <vscale x 32 x half> @intrinsic_vleff_v_nxv32f16_nxv32f16(<vscale x 32 x half>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vle16ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv32f16_nxv32f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 32 x half> @llvm.riscv.vleff.nxv32f16(
<vscale x 32 x half>* %0,
i64 %1)
@@ -1067,10 +1186,12 @@ declare <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
i64);
define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e16,m8,tu,mu
+; CHECK-NEXT: vle16ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32f16_nxv32f16
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
-; CHECK: vle16ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vleff.mask.nxv32f16(
<vscale x 32 x half> %0,
<vscale x 32 x half>* %1,
@@ -1085,10 +1206,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vleff.nxv1i8(
i64);
define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 1 x i8> @llvm.riscv.vleff.nxv1i8(
<vscale x 1 x i8>* %0,
i64 %1)
@@ -1103,10 +1226,12 @@ declare <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
i64);
define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf8,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 1 x i8> @llvm.riscv.vleff.mask.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8>* %1,
@@ -1121,10 +1246,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vleff.nxv2i8(
i64);
define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 2 x i8> @llvm.riscv.vleff.nxv2i8(
<vscale x 2 x i8>* %0,
i64 %1)
@@ -1139,10 +1266,12 @@ declare <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
i64);
define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf4,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 2 x i8> @llvm.riscv.vleff.mask.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8>* %1,
@@ -1157,10 +1286,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vleff.nxv4i8(
i64);
define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 4 x i8> @llvm.riscv.vleff.nxv4i8(
<vscale x 4 x i8>* %0,
i64 %1)
@@ -1175,10 +1306,12 @@ declare <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
i64);
define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,mf2,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 4 x i8> @llvm.riscv.vleff.mask.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8>* %1,
@@ -1193,10 +1326,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vleff.nxv8i8(
i64);
define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 8 x i8> @llvm.riscv.vleff.nxv8i8(
<vscale x 8 x i8>* %0,
i64 %1)
@@ -1211,10 +1346,12 @@ declare <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
i64);
define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m1,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 8 x i8> @llvm.riscv.vleff.mask.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8>* %1,
@@ -1229,10 +1366,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vleff.nxv16i8(
i64);
define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 16 x i8> @llvm.riscv.vleff.nxv16i8(
<vscale x 16 x i8>* %0,
i64 %1)
@@ -1247,10 +1386,12 @@ declare <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
i64);
define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m2,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 16 x i8> @llvm.riscv.vleff.mask.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8>* %1,
@@ -1265,10 +1406,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vleff.nxv32i8(
i64);
define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 32 x i8> @llvm.riscv.vleff.nxv32i8(
<vscale x 32 x i8>* %0,
i64 %1)
@@ -1283,10 +1426,12 @@ declare <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
i64);
define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m4,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 32 x i8> @llvm.riscv.vleff.mask.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8>* %1,
@@ -1301,10 +1446,12 @@ declare <vscale x 64 x i8> @llvm.riscv.vleff.nxv64i8(
i64);
define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, i64 %1) nounwind {
+; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vle8ff.v v16, (a0)
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0)
%a = call <vscale x 64 x i8> @llvm.riscv.vleff.nxv64i8(
<vscale x 64 x i8>* %0,
i64 %1)
@@ -1319,10 +1466,12 @@ declare <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
i64);
define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
+; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetvli a1, a1, e8,m8,tu,mu
+; CHECK-NEXT: vle8ff.v v16, (a0), v0.t
+; CHECK-NEXT: ret
entry:
-; CHECK-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8
-; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,tu,mu
-; CHECK: vle8ff.v {{v[0-9]+}}, (a0), v0.t
%a = call <vscale x 64 x i8> @llvm.riscv.vleff.mask.nxv64i8(
<vscale x 64 x i8> %0,
<vscale x 64 x i8>* %1,
More information about the llvm-branch-commits
mailing list