[llvm] CostModel/RISCV: fix typos in fround test, vector length (PR #67025)

via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 21 07:54:14 PDT 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-analysis

<details>
<summary>Changes</summary>

There are several typos in fround.ll, persumably caused by copy-pasting, where there is a strange nvx5* type. From the surrounding code, it is clear that this was intended to be nvx4*. Fix these typos.

---
Full diff: https://github.com/llvm/llvm-project/pull/67025.diff


1 Files Affected:

- (modified) llvm/test/Analysis/CostModel/RISCV/fround.ll (+28-28) 


``````````diff
diff --git a/llvm/test/Analysis/CostModel/RISCV/fround.ll b/llvm/test/Analysis/CostModel/RISCV/fround.ll
index b8e64820eded557..7ec18e56012e2c2 100644
--- a/llvm/test/Analysis/CostModel/RISCV/fround.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/fround.ll
@@ -41,7 +41,7 @@ define void @floor() {
   call <16 x double> @llvm.floor.v16f64(<16 x double> undef)
   call <vscale x 1 x double> @llvm.floor.nvx1f64(<vscale x 1 x double> undef)
   call <vscale x 2 x double> @llvm.floor.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.floor.nvx5f64(<vscale x 4 x double> undef)
+  call <vscale x 4 x double> @llvm.floor.nvx4f64(<vscale x 4 x double> undef)
   call <vscale x 8 x double> @llvm.floor.nvx8f64(<vscale x 8 x double> undef)
   ret void
 }
@@ -86,7 +86,7 @@ define void @ceil() {
   call <16 x double> @llvm.ceil.v16f64(<16 x double> undef)
   call <vscale x 1 x double> @llvm.ceil.nvx1f64(<vscale x 1 x double> undef)
   call <vscale x 2 x double> @llvm.ceil.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.ceil.nvx5f64(<vscale x 4 x double> undef)
+  call <vscale x 4 x double> @llvm.ceil.nvx4f64(<vscale x 4 x double> undef)
   call <vscale x 8 x double> @llvm.ceil.nvx8f64(<vscale x 8 x double> undef)
   ret void
 }
@@ -131,7 +131,7 @@ define void @trunc() {
   call <16 x double> @llvm.trunc.v16f64(<16 x double> undef)
   call <vscale x 1 x double> @llvm.trunc.nvx1f64(<vscale x 1 x double> undef)
   call <vscale x 2 x double> @llvm.trunc.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.trunc.nvx5f64(<vscale x 4 x double> undef)
+  call <vscale x 4 x double> @llvm.trunc.nvx4f64(<vscale x 4 x double> undef)
   call <vscale x 8 x double> @llvm.trunc.nvx8f64(<vscale x 8 x double> undef)
   ret void
 }
@@ -176,7 +176,7 @@ define void @rint() {
   call <16 x double> @llvm.rint.v16f64(<16 x double> undef)
   call <vscale x 1 x double> @llvm.rint.nvx1f64(<vscale x 1 x double> undef)
   call <vscale x 2 x double> @llvm.rint.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.rint.nvx5f64(<vscale x 4 x double> undef)
+  call <vscale x 4 x double> @llvm.rint.nvx4f64(<vscale x 4 x double> undef)
   call <vscale x 8 x double> @llvm.rint.nvx8f64(<vscale x 8 x double> undef)
   ret void
 }
@@ -221,7 +221,7 @@ define void @nearbyint() {
   call <16 x double> @llvm.nearbyint.v16f64(<16 x double> undef)
   call <vscale x 1 x double> @llvm.nearbyint.nvx1f64(<vscale x 1 x double> undef)
   call <vscale x 2 x double> @llvm.nearbyint.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.nearbyint.nvx5f64(<vscale x 4 x double> undef)
+  call <vscale x 4 x double> @llvm.nearbyint.nvx4f64(<vscale x 4 x double> undef)
   call <vscale x 8 x double> @llvm.nearbyint.nvx8f64(<vscale x 8 x double> undef)
   ret void
 }
@@ -266,7 +266,7 @@ define void @round() {
   call <16 x double> @llvm.round.v16f64(<16 x double> undef)
   call <vscale x 1 x double> @llvm.round.nvx1f64(<vscale x 1 x double> undef)
   call <vscale x 2 x double> @llvm.round.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.round.nvx5f64(<vscale x 4 x double> undef)
+  call <vscale x 4 x double> @llvm.round.nvx4f64(<vscale x 4 x double> undef)
   call <vscale x 8 x double> @llvm.round.nvx8f64(<vscale x 8 x double> undef)
   ret void
 }
@@ -311,7 +311,7 @@ define void @roundeven() {
   call <16 x double> @llvm.roundeven.v16f64(<16 x double> undef)
   call <vscale x 1 x double> @llvm.roundeven.nvx1f64(<vscale x 1 x double> undef)
   call <vscale x 2 x double> @llvm.roundeven.nvx2f64(<vscale x 2 x double> undef)
-  call <vscale x 4 x double> @llvm.roundeven.nvx5f64(<vscale x 4 x double> undef)
+  call <vscale x 4 x double> @llvm.roundeven.nvx4f64(<vscale x 4 x double> undef)
   call <vscale x 8 x double> @llvm.roundeven.nvx8f64(<vscale x 8 x double> undef)
   ret void
 }
@@ -352,7 +352,7 @@ define void @vp_ceil() {
   call <16 x double> @llvm.vp.ceil.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
   call <vscale x 1 x double> @llvm.vp.ceil.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
   call <vscale x 2 x double> @llvm.vp.ceil.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-  call <vscale x 4 x double> @llvm.vp.ceil.nvx5f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
+  call <vscale x 4 x double> @llvm.vp.ceil.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
   call <vscale x 8 x double> @llvm.vp.ceil.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
   ret void
 }
@@ -393,7 +393,7 @@ define void @vp_floor() {
   call <16 x double> @llvm.vp.floor.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
   call <vscale x 1 x double> @llvm.vp.floor.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
   call <vscale x 2 x double> @llvm.vp.floor.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-  call <vscale x 4 x double> @llvm.vp.floor.nvx5f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
+  call <vscale x 4 x double> @llvm.vp.floor.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
   call <vscale x 8 x double> @llvm.vp.floor.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
   ret void
 }
@@ -434,7 +434,7 @@ define void @vp_round() {
   call <16 x double> @llvm.vp.round.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
   call <vscale x 1 x double> @llvm.vp.round.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
   call <vscale x 2 x double> @llvm.vp.round.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-  call <vscale x 4 x double> @llvm.vp.round.nvx5f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
+  call <vscale x 4 x double> @llvm.vp.round.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
   call <vscale x 8 x double> @llvm.vp.round.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
   ret void
 }
@@ -475,7 +475,7 @@ define void @vp_roundeven() {
   call <16 x double> @llvm.vp.roundeven.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
   call <vscale x 1 x double> @llvm.vp.roundeven.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
   call <vscale x 2 x double> @llvm.vp.roundeven.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-  call <vscale x 4 x double> @llvm.vp.roundeven.nvx5f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
+  call <vscale x 4 x double> @llvm.vp.roundeven.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
   call <vscale x 8 x double> @llvm.vp.roundeven.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
   ret void
 }
@@ -516,7 +516,7 @@ define void @vp_roundtozero() {
   call <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
   call <vscale x 1 x double> @llvm.vp.roundtozero.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
   call <vscale x 2 x double> @llvm.vp.roundtozero.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-  call <vscale x 4 x double> @llvm.vp.roundtozero.nvx5f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
+  call <vscale x 4 x double> @llvm.vp.roundtozero.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
   call <vscale x 8 x double> @llvm.vp.roundtozero.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
   ret void
 }
@@ -557,7 +557,7 @@ define void @vp_rint() {
   call <16 x double> @llvm.vp.rint.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
   call <vscale x 1 x double> @llvm.vp.rint.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
   call <vscale x 2 x double> @llvm.vp.rint.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-  call <vscale x 4 x double> @llvm.vp.rint.nvx5f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
+  call <vscale x 4 x double> @llvm.vp.rint.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
   call <vscale x 8 x double> @llvm.vp.rint.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
   ret void
 }
@@ -598,7 +598,7 @@ define void @vp_nearbyint() {
   call <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double> undef, <16 x i1> undef, i32 undef)
   call <vscale x 1 x double> @llvm.vp.nearbyint.nvx1f64(<vscale x 1 x double> undef, <vscale x 1 x i1> undef, i32 undef)
   call <vscale x 2 x double> @llvm.vp.nearbyint.nvx2f64(<vscale x 2 x double> undef, <vscale x 2 x i1> undef, i32 undef)
-  call <vscale x 4 x double> @llvm.vp.nearbyint.nvx5f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
+  call <vscale x 4 x double> @llvm.vp.nearbyint.nvx4f64(<vscale x 4 x double> undef, <vscale x 4 x i1> undef, i32 undef)
   call <vscale x 8 x double> @llvm.vp.nearbyint.nvx8f64(<vscale x 8 x double> undef, <vscale x 8 x i1> undef, i32 undef)
   ret void
 }
@@ -620,7 +620,7 @@ declare <8 x double> @llvm.floor.v8f64(<8 x double>)
 declare <16 x double> @llvm.floor.v16f64(<16 x double>)
 declare <vscale x 1 x double> @llvm.floor.nvx1f64(<vscale x 1 x double>)
 declare <vscale x 2 x double> @llvm.floor.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.floor.nvx5f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.floor.nvx4f64(<vscale x 4 x double>)
 declare <vscale x 8 x double> @llvm.floor.nvx8f64(<vscale x 8 x double>)
 
 declare float @llvm.ceil.f32(float)
@@ -640,7 +640,7 @@ declare <8 x double> @llvm.ceil.v8f64(<8 x double>)
 declare <16 x double> @llvm.ceil.v16f64(<16 x double>)
 declare <vscale x 1 x double> @llvm.ceil.nvx1f64(<vscale x 1 x double>)
 declare <vscale x 2 x double> @llvm.ceil.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.ceil.nvx5f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.ceil.nvx4f64(<vscale x 4 x double>)
 declare <vscale x 8 x double> @llvm.ceil.nvx8f64(<vscale x 8 x double>)
 
 declare float @llvm.trunc.f32(float)
@@ -660,7 +660,7 @@ declare <8 x double> @llvm.trunc.v8f64(<8 x double>)
 declare <16 x double> @llvm.trunc.v16f64(<16 x double>)
 declare <vscale x 1 x double> @llvm.trunc.nvx1f64(<vscale x 1 x double>)
 declare <vscale x 2 x double> @llvm.trunc.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.trunc.nvx5f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.trunc.nvx4f64(<vscale x 4 x double>)
 declare <vscale x 8 x double> @llvm.trunc.nvx8f64(<vscale x 8 x double>)
 
 declare float @llvm.rint.f32(float)
@@ -680,7 +680,7 @@ declare <8 x double> @llvm.rint.v8f64(<8 x double>)
 declare <16 x double> @llvm.rint.v16f64(<16 x double>)
 declare <vscale x 1 x double> @llvm.rint.nvx1f64(<vscale x 1 x double>)
 declare <vscale x 2 x double> @llvm.rint.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.rint.nvx5f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.rint.nvx4f64(<vscale x 4 x double>)
 declare <vscale x 8 x double> @llvm.rint.nvx8f64(<vscale x 8 x double>)
 
 declare float @llvm.nearbyint.f32(float)
@@ -700,7 +700,7 @@ declare <8 x double> @llvm.nearbyint.v8f64(<8 x double>)
 declare <16 x double> @llvm.nearbyint.v16f64(<16 x double>)
 declare <vscale x 1 x double> @llvm.nearbyint.nvx1f64(<vscale x 1 x double>)
 declare <vscale x 2 x double> @llvm.nearbyint.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.nearbyint.nvx5f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.nearbyint.nvx4f64(<vscale x 4 x double>)
 declare <vscale x 8 x double> @llvm.nearbyint.nvx8f64(<vscale x 8 x double>)
 
 declare float @llvm.round.f32(float)
@@ -720,7 +720,7 @@ declare <8 x double> @llvm.round.v8f64(<8 x double>)
 declare <16 x double> @llvm.round.v16f64(<16 x double>)
 declare <vscale x 1 x double> @llvm.round.nvx1f64(<vscale x 1 x double>)
 declare <vscale x 2 x double> @llvm.round.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.round.nvx5f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.round.nvx4f64(<vscale x 4 x double>)
 declare <vscale x 8 x double> @llvm.round.nvx8f64(<vscale x 8 x double>)
 
 declare float @llvm.roundeven.f32(float)
@@ -740,7 +740,7 @@ declare <8 x double> @llvm.roundeven.v8f64(<8 x double>)
 declare <16 x double> @llvm.roundeven.v16f64(<16 x double>)
 declare <vscale x 1 x double> @llvm.roundeven.nvx1f64(<vscale x 1 x double>)
 declare <vscale x 2 x double> @llvm.roundeven.nvx2f64(<vscale x 2 x double>)
-declare <vscale x 4 x double> @llvm.roundeven.nvx5f64(<vscale x 4 x double>)
+declare <vscale x 4 x double> @llvm.roundeven.nvx4f64(<vscale x 4 x double>)
 declare <vscale x 8 x double> @llvm.roundeven.nvx8f64(<vscale x 8 x double>)
 
 declare <2 x float> @llvm.vp.ceil.v2f32(<2 x float>, <2 x i1>, i32)
@@ -759,7 +759,7 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
 declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32)
 declare <vscale x 1 x double> @llvm.vp.ceil.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
 declare <vscale x 2 x double> @llvm.vp.ceil.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.ceil.nvx5f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.ceil.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
 declare <vscale x 8 x double> @llvm.vp.ceil.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 declare <2 x float> @llvm.vp.floor.v2f32(<2 x float>, <2 x i1>, i32)
@@ -778,7 +778,7 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
 declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32)
 declare <vscale x 1 x double> @llvm.vp.floor.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
 declare <vscale x 2 x double> @llvm.vp.floor.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.floor.nvx5f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.floor.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
 declare <vscale x 8 x double> @llvm.vp.floor.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 declare <2 x float> @llvm.vp.round.v2f32(<2 x float>, <2 x i1>, i32)
@@ -797,7 +797,7 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
 declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32)
 declare <vscale x 1 x double> @llvm.vp.round.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
 declare <vscale x 2 x double> @llvm.vp.round.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.round.nvx5f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.round.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
 declare <vscale x 8 x double> @llvm.vp.round.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 declare <2 x float> @llvm.vp.roundeven.v2f32(<2 x float>, <2 x i1>, i32)
@@ -816,7 +816,7 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
 declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32)
 declare <vscale x 1 x double> @llvm.vp.roundeven.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
 declare <vscale x 2 x double> @llvm.vp.roundeven.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.roundeven.nvx5f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.roundeven.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
 declare <vscale x 8 x double> @llvm.vp.roundeven.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 declare <2 x float> @llvm.vp.roundtozero.v2f32(<2 x float>, <2 x i1>, i32)
@@ -835,7 +835,7 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
 declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32)
 declare <vscale x 1 x double> @llvm.vp.roundtozero.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
 declare <vscale x 2 x double> @llvm.vp.roundtozero.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.roundtozero.nvx5f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.roundtozero.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
 declare <vscale x 8 x double> @llvm.vp.roundtozero.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 declare <2 x float> @llvm.vp.rint.v2f32(<2 x float>, <2 x i1>, i32)
@@ -854,7 +854,7 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
 declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32)
 declare <vscale x 1 x double> @llvm.vp.rint.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
 declare <vscale x 2 x double> @llvm.vp.rint.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.rint.nvx5f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.rint.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
 declare <vscale x 8 x double> @llvm.vp.rint.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
 
 declare <2 x float> @llvm.vp.nearbyint.v2f32(<2 x float>, <2 x i1>, i32)
@@ -873,5 +873,5 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
 declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32)
 declare <vscale x 1 x double> @llvm.vp.nearbyint.nvx1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
 declare <vscale x 2 x double> @llvm.vp.nearbyint.nvx2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
-declare <vscale x 4 x double> @llvm.vp.nearbyint.nvx5f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.nearbyint.nvx4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
 declare <vscale x 8 x double> @llvm.vp.nearbyint.nvx8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)

``````````

</details>


https://github.com/llvm/llvm-project/pull/67025


More information about the llvm-commits mailing list