[llvm] [RISCV] Use opaque pointers in some tests. NFC (PR #127906)

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 19 14:10:22 PST 2025


https://github.com/topperc created https://github.com/llvm/llvm-project/pull/127906

None

>From bb8e0ac986e2bf9351ae135fdf5d05229cef9feb Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Wed, 19 Feb 2025 14:09:34 -0800
Subject: [PATCH] [RISCV] Use opaque pointers in some tests. NFC

---
 llvm/test/CodeGen/RISCV/rvv/masked-tama.ll |   4 +-
 llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll |   4 +-
 llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll |   4 +-
 llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll |   4 +-
 llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll |   4 +-
 llvm/test/CodeGen/RISCV/rvv/vleff.ll       | 356 ++++++++++-----------
 6 files changed, 188 insertions(+), 188 deletions(-)

diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index cd678c9740721..4b56518a89d69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -34,7 +34,7 @@ declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i1> %1, iXLen %2, iXLen* %3) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(ptr %0, <vscale x 1 x i1> %1, iXLen %2, ptr %3) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -58,7 +58,7 @@ entry:
     iXLen %2, iXLen 3)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %3
+  store iXLen %c, ptr %3
 
   ret <vscale x 1 x i64> %b
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
index 0e771eb7c4315..2e8b6c5fcca22 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tamu.ll
@@ -32,7 +32,7 @@ declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -56,7 +56,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x i64> %b
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
index c4c2fc88913bb..ca9b6245a8570 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tuma.ll
@@ -32,7 +32,7 @@ declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, tu, ma
@@ -56,7 +56,7 @@ entry:
     iXLen %3, iXLen 2)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x i64> %b
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
index ec0ebb10e8f7a..df9e84c66cefa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tumu.ll
@@ -32,7 +32,7 @@ declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   <vscale x 1 x i1>,
   iXLen,
   iXLen)
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
@@ -56,7 +56,7 @@ entry:
     iXLen %3, iXLen 0)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x i64> %b
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
index 3052108a12e3e..1735a0f5a1f2b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
@@ -52,7 +52,7 @@ declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff(
   ptr,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vleff_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, iXLen* %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_vleff_v_tu(<vscale x 1 x i8> %0, ptr %1, iXLen %2, ptr %3) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_tu:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf8, tu, ma
@@ -75,7 +75,7 @@ entry:
     iXLen %2)
   %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %3
+  store iXLen %c, ptr %3
   ret <vscale x 1 x i8> %b
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/vleff.ll b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
index 39e0a0d02e88d..1f3959c1eac8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vleff.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vleff.ll
@@ -9,7 +9,7 @@ declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.nxv1i64(
   ptr,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_v_nxv1i64_nxv1i64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -32,7 +32,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x i64> %b
 }
 
@@ -43,7 +43,7 @@ declare { <vscale x 1 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv1i64(
   iXLen,
   iXLen);
 
-define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i64> @intrinsic_vleff_mask_v_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -67,7 +67,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x i64> %b
 }
@@ -77,7 +77,7 @@ declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.nxv2i64(
   ptr,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i64> @intrinsic_vleff_v_nxv2i64_nxv2i64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i64_nxv2i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -100,7 +100,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x i64> %b
 }
 
@@ -111,7 +111,7 @@ declare { <vscale x 2 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv2i64(
   iXLen,
   iXLen);
 
-define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i64> @intrinsic_vleff_mask_v_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i64_nxv2i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -135,7 +135,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x i64> %b
 }
@@ -145,7 +145,7 @@ declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.nxv4i64(
   ptr,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i64> @intrinsic_vleff_v_nxv4i64_nxv4i64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i64_nxv4i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -168,7 +168,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x i64> %b
 }
 
@@ -179,7 +179,7 @@ declare { <vscale x 4 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv4i64(
   iXLen,
   iXLen);
 
-define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i64> @intrinsic_vleff_mask_v_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i64_nxv4i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -203,7 +203,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x i64> %b
 }
@@ -213,7 +213,7 @@ declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.nxv8i64(
   ptr,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i64> @intrinsic_vleff_v_nxv8i64_nxv8i64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i64_nxv8i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -236,7 +236,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x i64> %b
 }
 
@@ -247,7 +247,7 @@ declare { <vscale x 8 x i64>, iXLen } @llvm.riscv.vleff.mask.nxv8i64(
   iXLen,
   iXLen);
 
-define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i64> @intrinsic_vleff_mask_v_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i64_nxv8i64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -271,7 +271,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i64>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i64>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x i64> %b
 }
@@ -281,7 +281,7 @@ declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.nxv1f64(
   ptr,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vleff_v_nxv1f64_nxv1f64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1f64_nxv1f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -304,7 +304,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x double> %b
 }
 
@@ -315,7 +315,7 @@ declare { <vscale x 1 x double>, iXLen } @llvm.riscv.vleff.mask.nxv1f64(
   iXLen,
   iXLen);
 
-define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x double> @intrinsic_vleff_mask_v_nxv1f64_nxv1f64(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f64_nxv1f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -339,7 +339,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x double> %b
 }
@@ -349,7 +349,7 @@ declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.nxv2f64(
   ptr,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x double> @intrinsic_vleff_v_nxv2f64_nxv2f64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2f64_nxv2f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -372,7 +372,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x double> %b
 }
 
@@ -383,7 +383,7 @@ declare { <vscale x 2 x double>, iXLen } @llvm.riscv.vleff.mask.nxv2f64(
   iXLen,
   iXLen);
 
-define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x double> @intrinsic_vleff_mask_v_nxv2f64_nxv2f64(<vscale x 2 x double> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f64_nxv2f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
@@ -407,7 +407,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x double> %b
 }
@@ -417,7 +417,7 @@ declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.nxv4f64(
   ptr,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x double> @intrinsic_vleff_v_nxv4f64_nxv4f64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4f64_nxv4f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -440,7 +440,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x double> %b
 }
 
@@ -451,7 +451,7 @@ declare { <vscale x 4 x double>, iXLen } @llvm.riscv.vleff.mask.nxv4f64(
   iXLen,
   iXLen);
 
-define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x double> @intrinsic_vleff_mask_v_nxv4f64_nxv4f64(<vscale x 4 x double> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f64_nxv4f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
@@ -475,7 +475,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x double> %b
 }
@@ -485,7 +485,7 @@ declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.nxv8f64(
   ptr,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x double> @intrinsic_vleff_v_nxv8f64_nxv8f64(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8f64_nxv8f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, ma
@@ -508,7 +508,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x double> %b
 }
 
@@ -519,7 +519,7 @@ declare { <vscale x 8 x double>, iXLen } @llvm.riscv.vleff.mask.nxv8f64(
   iXLen,
   iXLen);
 
-define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x double> @intrinsic_vleff_mask_v_nxv8f64_nxv8f64(<vscale x 8 x double> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f64_nxv8f64:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
@@ -543,7 +543,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x double>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x double>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x double> %b
 }
@@ -553,7 +553,7 @@ declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.nxv1i32(
   ptr,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i32> @intrinsic_vleff_v_nxv1i32_nxv1i32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i32_nxv1i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -576,7 +576,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x i32> %b
 }
 
@@ -587,7 +587,7 @@ declare { <vscale x 1 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv1i32(
   iXLen,
   iXLen);
 
-define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i32> @intrinsic_vleff_mask_v_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i32_nxv1i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -611,7 +611,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x i32> %b
 }
@@ -621,7 +621,7 @@ declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.nxv2i32(
   ptr,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i32> @intrinsic_vleff_v_nxv2i32_nxv2i32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i32_nxv2i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -644,7 +644,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x i32> %b
 }
 
@@ -655,7 +655,7 @@ declare { <vscale x 2 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv2i32(
   iXLen,
   iXLen);
 
-define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i32> @intrinsic_vleff_mask_v_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i32_nxv2i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -679,7 +679,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x i32> %b
 }
@@ -689,7 +689,7 @@ declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.nxv4i32(
   ptr,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i32> @intrinsic_vleff_v_nxv4i32_nxv4i32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i32_nxv4i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -712,7 +712,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x i32> %b
 }
 
@@ -723,7 +723,7 @@ declare { <vscale x 4 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv4i32(
   iXLen,
   iXLen);
 
-define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i32> @intrinsic_vleff_mask_v_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i32_nxv4i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -747,7 +747,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x i32> %b
 }
@@ -757,7 +757,7 @@ declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.nxv8i32(
   ptr,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i32> @intrinsic_vleff_v_nxv8i32_nxv8i32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i32_nxv8i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -780,7 +780,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x i32> %b
 }
 
@@ -791,7 +791,7 @@ declare { <vscale x 8 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv8i32(
   iXLen,
   iXLen);
 
-define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i32> @intrinsic_vleff_mask_v_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i32_nxv8i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -815,7 +815,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x i32> %b
 }
@@ -825,7 +825,7 @@ declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.nxv16i32(
   ptr,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x i32> @intrinsic_vleff_v_nxv16i32_nxv16i32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16i32_nxv16i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -848,7 +848,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 16 x i32> %b
 }
 
@@ -859,7 +859,7 @@ declare { <vscale x 16 x i32>, iXLen } @llvm.riscv.vleff.mask.nxv16i32(
   iXLen,
   iXLen);
 
-define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x i32> @intrinsic_vleff_mask_v_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i32_nxv16i32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -883,7 +883,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x i32>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i32>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 16 x i32> %b
 }
@@ -893,7 +893,7 @@ declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.nxv1f32(
   ptr,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x float> @intrinsic_vleff_v_nxv1f32_nxv1f32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1f32_nxv1f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -916,7 +916,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x float> %b
 }
 
@@ -927,7 +927,7 @@ declare { <vscale x 1 x float>, iXLen } @llvm.riscv.vleff.mask.nxv1f32(
   iXLen,
   iXLen);
 
-define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x float> @intrinsic_vleff_mask_v_nxv1f32_nxv1f32(<vscale x 1 x float> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1f32_nxv1f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
@@ -951,7 +951,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x float> %b
 }
@@ -961,7 +961,7 @@ declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.nxv2f32(
   ptr,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x float> @intrinsic_vleff_v_nxv2f32_nxv2f32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2f32_nxv2f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -984,7 +984,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x float> %b
 }
 
@@ -995,7 +995,7 @@ declare { <vscale x 2 x float>, iXLen } @llvm.riscv.vleff.mask.nxv2f32(
   iXLen,
   iXLen);
 
-define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x float> @intrinsic_vleff_mask_v_nxv2f32_nxv2f32(<vscale x 2 x float> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2f32_nxv2f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
@@ -1019,7 +1019,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x float> %b
 }
@@ -1029,7 +1029,7 @@ declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.nxv4f32(
   ptr,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x float> @intrinsic_vleff_v_nxv4f32_nxv4f32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4f32_nxv4f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -1052,7 +1052,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x float> %b
 }
 
@@ -1063,7 +1063,7 @@ declare { <vscale x 4 x float>, iXLen } @llvm.riscv.vleff.mask.nxv4f32(
   iXLen,
   iXLen);
 
-define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x float> @intrinsic_vleff_mask_v_nxv4f32_nxv4f32(<vscale x 4 x float> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4f32_nxv4f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
@@ -1087,7 +1087,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x float> %b
 }
@@ -1097,7 +1097,7 @@ declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.nxv8f32(
   ptr,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x float> @intrinsic_vleff_v_nxv8f32_nxv8f32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8f32_nxv8f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -1120,7 +1120,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x float> %b
 }
 
@@ -1131,7 +1131,7 @@ declare { <vscale x 8 x float>, iXLen } @llvm.riscv.vleff.mask.nxv8f32(
   iXLen,
   iXLen);
 
-define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x float> @intrinsic_vleff_mask_v_nxv8f32_nxv8f32(<vscale x 8 x float> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8f32_nxv8f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
@@ -1155,7 +1155,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x float> %b
 }
@@ -1165,7 +1165,7 @@ declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.nxv16f32(
   ptr,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x float> @intrinsic_vleff_v_nxv16f32_nxv16f32(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16f32_nxv16f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
@@ -1188,7 +1188,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 16 x float> %b
 }
 
@@ -1199,7 +1199,7 @@ declare { <vscale x 16 x float>, iXLen } @llvm.riscv.vleff.mask.nxv16f32(
   iXLen,
   iXLen);
 
-define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x float> @intrinsic_vleff_mask_v_nxv16f32_nxv16f32(<vscale x 16 x float> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16f32_nxv16f32:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
@@ -1223,7 +1223,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x float>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x float>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 16 x float> %b
 }
@@ -1233,7 +1233,7 @@ declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.nxv1i16(
   ptr,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i16> @intrinsic_vleff_v_nxv1i16_nxv1i16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i16_nxv1i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1256,7 +1256,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x i16> %b
 }
 
@@ -1267,7 +1267,7 @@ declare { <vscale x 1 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv1i16(
   iXLen,
   iXLen);
 
-define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i16> @intrinsic_vleff_mask_v_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i16_nxv1i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1291,7 +1291,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x i16> %b
 }
@@ -1301,7 +1301,7 @@ declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.nxv2i16(
   ptr,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i16> @intrinsic_vleff_v_nxv2i16_nxv2i16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i16_nxv2i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1324,7 +1324,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x i16> %b
 }
 
@@ -1335,7 +1335,7 @@ declare { <vscale x 2 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv2i16(
   iXLen,
   iXLen);
 
-define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i16> @intrinsic_vleff_mask_v_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i16_nxv2i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1359,7 +1359,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x i16> %b
 }
@@ -1369,7 +1369,7 @@ declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.nxv4i16(
   ptr,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i16> @intrinsic_vleff_v_nxv4i16_nxv4i16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i16_nxv4i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1392,7 +1392,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x i16> %b
 }
 
@@ -1403,7 +1403,7 @@ declare { <vscale x 4 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv4i16(
   iXLen,
   iXLen);
 
-define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i16> @intrinsic_vleff_mask_v_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i16_nxv4i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1427,7 +1427,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x i16> %b
 }
@@ -1437,7 +1437,7 @@ declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.nxv8i16(
   ptr,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i16> @intrinsic_vleff_v_nxv8i16_nxv8i16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i16_nxv8i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1460,7 +1460,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x i16> %b
 }
 
@@ -1471,7 +1471,7 @@ declare { <vscale x 8 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv8i16(
   iXLen,
   iXLen);
 
-define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i16> @intrinsic_vleff_mask_v_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i16_nxv8i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1495,7 +1495,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x i16> %b
 }
@@ -1505,7 +1505,7 @@ declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.nxv16i16(
   ptr,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x i16> @intrinsic_vleff_v_nxv16i16_nxv16i16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16i16_nxv16i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1528,7 +1528,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 16 x i16> %b
 }
 
@@ -1539,7 +1539,7 @@ declare { <vscale x 16 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv16i16(
   iXLen,
   iXLen);
 
-define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x i16> @intrinsic_vleff_mask_v_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i16_nxv16i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1563,7 +1563,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 16 x i16> %b
 }
@@ -1573,7 +1573,7 @@ declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.nxv32i16(
   ptr,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 32 x i16> @intrinsic_vleff_v_nxv32i16_nxv32i16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv32i16_nxv32i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -1596,7 +1596,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 32 x i16> %b
 }
 
@@ -1607,7 +1607,7 @@ declare { <vscale x 32 x i16>, iXLen } @llvm.riscv.vleff.mask.nxv32i16(
   iXLen,
   iXLen);
 
-define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 32 x i16> @intrinsic_vleff_mask_v_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i16_nxv32i16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -1631,7 +1631,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 32 x i16>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x i16>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 32 x i16> %b
 }
@@ -1641,7 +1641,7 @@ declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.nxv1bf16(
   ptr,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x half> @intrinsic_vleff_v_nxv1half_nxv1bf16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1half_nxv1bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1664,7 +1664,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x half> %b
 }
 
@@ -1675,7 +1675,7 @@ declare { <vscale x 1 x half>, iXLen } @llvm.riscv.vleff.mask.nxv1bf16(
   iXLen,
   iXLen);
 
-define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1bf16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x half> @intrinsic_vleff_mask_v_nxv1half_nxv1bf16(<vscale x 1 x half> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1half_nxv1bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -1699,7 +1699,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x half> %b
 }
@@ -1709,7 +1709,7 @@ declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.nxv2bf16(
   ptr,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x half> @intrinsic_vleff_v_nxv2half_nxv2bf16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2half_nxv2bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -1732,7 +1732,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x half> %b
 }
 
@@ -1743,7 +1743,7 @@ declare { <vscale x 2 x half>, iXLen } @llvm.riscv.vleff.mask.nxv2bf16(
   iXLen,
   iXLen);
 
-define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2bf16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x half> @intrinsic_vleff_mask_v_nxv2half_nxv2bf16(<vscale x 2 x half> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2half_nxv2bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -1767,7 +1767,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x half> %b
 }
@@ -1777,7 +1777,7 @@ declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.nxv4bf16(
   ptr,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x half> @intrinsic_vleff_v_nxv4half_nxv4bf16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4half_nxv4bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1800,7 +1800,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x half> %b
 }
 
@@ -1811,7 +1811,7 @@ declare { <vscale x 4 x half>, iXLen } @llvm.riscv.vleff.mask.nxv4bf16(
   iXLen,
   iXLen);
 
-define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4bf16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x half> @intrinsic_vleff_mask_v_nxv4half_nxv4bf16(<vscale x 4 x half> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4half_nxv4bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -1835,7 +1835,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x half> %b
 }
@@ -1845,7 +1845,7 @@ declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.nxv8bf16(
   ptr,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x half> @intrinsic_vleff_v_nxv8half_nxv8bf16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8half_nxv8bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1868,7 +1868,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x half> %b
 }
 
@@ -1879,7 +1879,7 @@ declare { <vscale x 8 x half>, iXLen } @llvm.riscv.vleff.mask.nxv8bf16(
   iXLen,
   iXLen);
 
-define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8bf16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x half> @intrinsic_vleff_mask_v_nxv8half_nxv8bf16(<vscale x 8 x half> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8half_nxv8bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -1903,7 +1903,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x half> %b
 }
@@ -1913,7 +1913,7 @@ declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.nxv16bf16(
   ptr,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x half> @intrinsic_vleff_v_nxv16half_nxv16bf16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16half_nxv16bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -1936,7 +1936,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 16 x half> %b
 }
 
@@ -1947,7 +1947,7 @@ declare { <vscale x 16 x half>, iXLen } @llvm.riscv.vleff.mask.nxv16bf16(
   iXLen,
   iXLen);
 
-define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16bf16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x half> @intrinsic_vleff_mask_v_nxv16half_nxv16bf16(<vscale x 16 x half> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16half_nxv16bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -1971,7 +1971,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 16 x half> %b
 }
@@ -1981,7 +1981,7 @@ declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.nxv32bf16(
   ptr,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32bf16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 32 x half> @intrinsic_vleff_v_nxv32half_nxv32bf16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv32half_nxv32bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2004,7 +2004,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 32 x half> %b
 }
 
@@ -2015,7 +2015,7 @@ declare { <vscale x 32 x half>, iXLen } @llvm.riscv.vleff.mask.nxv32bf16(
   iXLen,
   iXLen);
 
-define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32bf16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 32 x half> @intrinsic_vleff_mask_v_nxv32half_nxv32bf16(<vscale x 32 x half> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32half_nxv32bf16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2039,7 +2039,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 32 x half>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x half>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 32 x half> %b
 }
@@ -2049,7 +2049,7 @@ declare { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.nxv1f16(
   ptr,
   iXLen);
 
-define <vscale x 1 x bfloat> @intrinsic_vleff_v_nxv1bfloat_nxv1f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x bfloat> @intrinsic_vleff_v_nxv1bfloat_nxv1f16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1bfloat_nxv1f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -2072,7 +2072,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x bfloat> %b
 }
 
@@ -2083,7 +2083,7 @@ declare { <vscale x 1 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv1f16(
   iXLen,
   iXLen);
 
-define <vscale x 1 x bfloat> @intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x bfloat> @intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16(<vscale x 1 x bfloat> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1bfloat_nxv1f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
@@ -2107,7 +2107,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x bfloat> %b
 }
@@ -2117,7 +2117,7 @@ declare { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.nxv2f16(
   ptr,
   iXLen);
 
-define <vscale x 2 x bfloat> @intrinsic_vleff_v_nxv2bfloat_nxv2f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x bfloat> @intrinsic_vleff_v_nxv2bfloat_nxv2f16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2bfloat_nxv2f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2140,7 +2140,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x bfloat> %b
 }
 
@@ -2151,7 +2151,7 @@ declare { <vscale x 2 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv2f16(
   iXLen,
   iXLen);
 
-define <vscale x 2 x bfloat> @intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x bfloat> @intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16(<vscale x 2 x bfloat> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2bfloat_nxv2f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
@@ -2175,7 +2175,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x bfloat> %b
 }
@@ -2185,7 +2185,7 @@ declare { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.nxv4f16(
   ptr,
   iXLen);
 
-define <vscale x 4 x bfloat> @intrinsic_vleff_v_nxv4bfloat_nxv4f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x bfloat> @intrinsic_vleff_v_nxv4bfloat_nxv4f16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4bfloat_nxv4f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -2208,7 +2208,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x bfloat> %b
 }
 
@@ -2219,7 +2219,7 @@ declare { <vscale x 4 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv4f16(
   iXLen,
   iXLen);
 
-define <vscale x 4 x bfloat> @intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x bfloat> @intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16(<vscale x 4 x bfloat> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4bfloat_nxv4f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
@@ -2243,7 +2243,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x bfloat> %b
 }
@@ -2253,7 +2253,7 @@ declare { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.nxv8f16(
   ptr,
   iXLen);
 
-define <vscale x 8 x bfloat> @intrinsic_vleff_v_nxv8bfloat_nxv8f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x bfloat> @intrinsic_vleff_v_nxv8bfloat_nxv8f16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8bfloat_nxv8f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2276,7 +2276,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x bfloat> %b
 }
 
@@ -2287,7 +2287,7 @@ declare { <vscale x 8 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv8f16(
   iXLen,
   iXLen);
 
-define <vscale x 8 x bfloat> @intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x bfloat> @intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16(<vscale x 8 x bfloat> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8bfloat_nxv8f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
@@ -2311,7 +2311,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x bfloat> %b
 }
@@ -2321,7 +2321,7 @@ declare { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.nxv16f16(
   ptr,
   iXLen);
 
-define <vscale x 16 x bfloat> @intrinsic_vleff_v_nxv16bfloat_nxv16f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x bfloat> @intrinsic_vleff_v_nxv16bfloat_nxv16f16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16bfloat_nxv16f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -2344,7 +2344,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 16 x bfloat> %b
 }
 
@@ -2355,7 +2355,7 @@ declare { <vscale x 16 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv16f16(
   iXLen,
   iXLen);
 
-define <vscale x 16 x bfloat> @intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x bfloat> @intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16(<vscale x 16 x bfloat> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16bfloat_nxv16f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
@@ -2379,7 +2379,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 16 x bfloat> %b
 }
@@ -2389,7 +2389,7 @@ declare { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.nxv32f16(
   ptr,
   iXLen);
 
-define <vscale x 32 x bfloat> @intrinsic_vleff_v_nxv32bfloat_nxv32f16(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 32 x bfloat> @intrinsic_vleff_v_nxv32bfloat_nxv32f16(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv32bfloat_nxv32f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
@@ -2412,7 +2412,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 32 x bfloat> %b
 }
 
@@ -2423,7 +2423,7 @@ declare { <vscale x 32 x bfloat>, iXLen } @llvm.riscv.vleff.mask.nxv32f16(
   iXLen,
   iXLen);
 
-define <vscale x 32 x bfloat> @intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16(<vscale x 32 x bfloat> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 32 x bfloat> @intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16(<vscale x 32 x bfloat> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32bfloat_nxv32f16:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
@@ -2447,7 +2447,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x bfloat>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 32 x bfloat> %b
 }
@@ -2457,7 +2457,7 @@ declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.nxv1i8(
   ptr,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x i8> @intrinsic_vleff_v_nxv1i8_nxv1i8(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv1i8_nxv1i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -2480,7 +2480,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 1 x i8> %b
 }
 
@@ -2491,7 +2491,7 @@ declare { <vscale x 1 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv1i8(
   iXLen,
   iXLen);
 
-define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 1 x i8> @intrinsic_vleff_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i8_nxv1i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -2515,7 +2515,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 1 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 1 x i8> %b
 }
@@ -2525,7 +2525,7 @@ declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.nxv2i8(
   ptr,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 2 x i8> @intrinsic_vleff_v_nxv2i8_nxv2i8(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv2i8_nxv2i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2548,7 +2548,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 2 x i8> %b
 }
 
@@ -2559,7 +2559,7 @@ declare { <vscale x 2 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv2i8(
   iXLen,
   iXLen);
 
-define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 2 x i8> @intrinsic_vleff_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv2i8_nxv2i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -2583,7 +2583,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 2 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 2 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 2 x i8> %b
 }
@@ -2593,7 +2593,7 @@ declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.nxv4i8(
   ptr,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 4 x i8> @intrinsic_vleff_v_nxv4i8_nxv4i8(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv4i8_nxv4i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -2616,7 +2616,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 4 x i8> %b
 }
 
@@ -2627,7 +2627,7 @@ declare { <vscale x 4 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv4i8(
   iXLen,
   iXLen);
 
-define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 4 x i8> @intrinsic_vleff_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv4i8_nxv4i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -2651,7 +2651,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 4 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 4 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 4 x i8> %b
 }
@@ -2661,7 +2661,7 @@ declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.nxv8i8(
   ptr,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 8 x i8> @intrinsic_vleff_v_nxv8i8_nxv8i8(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv8i8_nxv8i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -2684,7 +2684,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 8 x i8> %b
 }
 
@@ -2695,7 +2695,7 @@ declare { <vscale x 8 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv8i8(
   iXLen,
   iXLen);
 
-define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 8 x i8> @intrinsic_vleff_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv8i8_nxv8i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -2719,7 +2719,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 8 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 8 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 8 x i8> %b
 }
@@ -2729,7 +2729,7 @@ declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.nxv16i8(
   ptr,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 16 x i8> @intrinsic_vleff_v_nxv16i8_nxv16i8(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv16i8_nxv16i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -2752,7 +2752,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 16 x i8> %b
 }
 
@@ -2763,7 +2763,7 @@ declare { <vscale x 16 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv16i8(
   iXLen,
   iXLen);
 
-define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 16 x i8> @intrinsic_vleff_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv16i8_nxv16i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -2787,7 +2787,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 16 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 16 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 16 x i8> %b
 }
@@ -2797,7 +2797,7 @@ declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.nxv32i8(
   ptr,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 32 x i8> @intrinsic_vleff_v_nxv32i8_nxv32i8(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv32i8_nxv32i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -2820,7 +2820,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 32 x i8> %b
 }
 
@@ -2831,7 +2831,7 @@ declare { <vscale x 32 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv32i8(
   iXLen,
   iXLen);
 
-define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 32 x i8> @intrinsic_vleff_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv32i8_nxv32i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -2855,7 +2855,7 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 32 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 32 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 32 x i8> %b
 }
@@ -2865,7 +2865,7 @@ declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.nxv64i8(
   ptr,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 64 x i8> @intrinsic_vleff_v_nxv64i8_nxv64i8(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_v_nxv64i8_nxv64i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -2888,7 +2888,7 @@ entry:
     iXLen %1)
   %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %2
+  store iXLen %c, ptr %2
   ret <vscale x 64 x i8> %b
 }
 
@@ -2899,7 +2899,7 @@ declare { <vscale x 64 x i8>, iXLen } @llvm.riscv.vleff.mask.nxv64i8(
   iXLen,
   iXLen);
 
-define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define <vscale x 64 x i8> @intrinsic_vleff_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_v_nxv64i8_nxv64i8:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -2923,13 +2923,13 @@ entry:
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 64 x i8>, iXLen } %a, 0
   %c = extractvalue { <vscale x 64 x i8>, iXLen } %a, 1
-  store iXLen %c, iXLen* %4
+  store iXLen %c, ptr %4
 
   ret <vscale x 64 x i8> %b
 }
 
 ; Test with the VL output unused
-define <vscale x 1 x double> @intrinsic_vleff_dead_vl(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define <vscale x 1 x double> @intrinsic_vleff_dead_vl(ptr %0, iXLen %1, ptr %2) nounwind {
 ; CHECK-LABEL: intrinsic_vleff_dead_vl:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2962,7 +2962,7 @@ entry:
 }
 
 ; Test with the loaded value unused
-define void @intrinsic_vleff_dead_value(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define void @intrinsic_vleff_dead_value(ptr %0, iXLen %1, ptr %2) nounwind {
 ; RV32-LABEL: intrinsic_vleff_dead_value:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2984,11 +2984,11 @@ entry:
     ptr %0,
     iXLen %1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
-  store iXLen %b, iXLen* %2
+  store iXLen %b, ptr %2
   ret void
 }
 
-define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, iXLen* %4) nounwind {
+define void @intrinsic_vleff_mask_dead_value(<vscale x 1 x double> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3, ptr %4) nounwind {
 ; RV32-LABEL: intrinsic_vleff_mask_dead_value:
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
@@ -3011,13 +3011,13 @@ entry:
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
   %b = extractvalue { <vscale x 1 x double>, iXLen } %a, 1
-  store iXLen %b, iXLen* %4
+  store iXLen %b, ptr %4
 
   ret void
 }
 
 ; Test with both outputs dead. Make sure the vleff isn't deleted.
-define void @intrinsic_vleff_dead_all(ptr %0, iXLen %1, iXLen* %2) nounwind {
+define void @intrinsic_vleff_dead_all(ptr %0, iXLen %1, ptr %2) nounwind {
 ; CHECK-LABEL: intrinsic_vleff_dead_all:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma



More information about the llvm-commits mailing list