[llvm] 38cd903 - [RISCV] Convert the XAndesVSIntLoad intrinsic tests to opaque pointers. NFC

Jim Lin via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 9 08:13:20 PDT 2025


Author: Jim Lin
Date: 2025-07-09T23:10:59+08:00
New Revision: 38cd9033987623a00a144eeb304ccacdb27d116a

URL: https://github.com/llvm/llvm-project/commit/38cd9033987623a00a144eeb304ccacdb27d116a
DIFF: https://github.com/llvm/llvm-project/commit/38cd9033987623a00a144eeb304ccacdb27d116a.diff

LOG: [RISCV] Convert the XAndesVSIntLoad intrinsic tests to opaque pointers. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll
    llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll
index e90e4e560075a..7675d5cbd00a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vln8.ll
@@ -4,7 +4,7 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+xandesvsintload \
 ; RUN:   -verify-machineinstrs -target-abi=lp64 | FileCheck %s
 
-define <vscale x 1 x i8> @intrinsic_nds_vln_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_nds_vln_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -13,13 +13,13 @@ define <vscale x 1 x i8> @intrinsic_nds_vln_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.nds.vln.nxv1i8(
     <vscale x 1 x i8> poison,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_nds_vln_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_nds_vln_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -28,14 +28,14 @@ define <vscale x 1 x i8> @intrinsic_nds_vln_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.nds.vln.mask.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_nds_vln_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_nds_vln_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -44,13 +44,13 @@ define <vscale x 2 x i8> @intrinsic_nds_vln_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.nds.vln.nxv2i8(
     <vscale x 2 x i8> poison,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_nds_vln_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_nds_vln_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -59,14 +59,14 @@ define <vscale x 2 x i8> @intrinsic_nds_vln_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.nds.vln.mask.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_nds_vln_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_nds_vln_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -75,13 +75,13 @@ define <vscale x 4 x i8> @intrinsic_nds_vln_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.nds.vln.nxv4i8(
     <vscale x 4 x i8> poison,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_nds_vln_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_nds_vln_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -90,14 +90,14 @@ define <vscale x 4 x i8> @intrinsic_nds_vln_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.nds.vln.mask.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_nds_vln_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_nds_vln_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -106,13 +106,13 @@ define <vscale x 8 x i8> @intrinsic_nds_vln_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.nds.vln.nxv8i8(
     <vscale x 8 x i8> poison,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_nds_vln_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_nds_vln_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -121,14 +121,14 @@ define <vscale x 8 x i8> @intrinsic_nds_vln_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.nds.vln.mask.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_nds_vln_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_nds_vln_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -137,13 +137,13 @@ define <vscale x 16 x i8> @intrinsic_nds_vln_v_nxv16i8_nxv16i8(<vscale x 16 x i8
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.nds.vln.nxv16i8(
     <vscale x 16 x i8> poison,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_nds_vln_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_nds_vln_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -152,14 +152,14 @@ define <vscale x 16 x i8> @intrinsic_nds_vln_mask_v_nxv16i8_nxv16i8(<vscale x 16
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.nds.vln.mask.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_nds_vln_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_nds_vln_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -168,13 +168,13 @@ define <vscale x 32 x i8> @intrinsic_nds_vln_v_nxv32i8_nxv32i8(<vscale x 32 x i8
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.nds.vln.nxv32i8(
     <vscale x 32 x i8> poison,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_nds_vln_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_nds_vln_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -183,14 +183,14 @@ define <vscale x 32 x i8> @intrinsic_nds_vln_mask_v_nxv32i8_nxv32i8(<vscale x 32
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.nds.vln.mask.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_nds_vln_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_nds_vln_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -199,13 +199,13 @@ define <vscale x 64 x i8> @intrinsic_nds_vln_v_nxv64i8_nxv64i8(<vscale x 64 x i8
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.nds.vln.nxv64i8(
     <vscale x 64 x i8> poison,
-    <vscale x 64 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_nds_vln_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_nds_vln_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vln_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -214,7 +214,7 @@ define <vscale x 64 x i8> @intrinsic_nds_vln_mask_v_nxv64i8_nxv64i8(<vscale x 64
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.nds.vln.mask.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i1> %2,
     iXLen %3, iXLen 1)
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll
index 363d57b56b031..adfd9f521f682 100644
--- a/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/xandesvsinload-vlnu8.ll
@@ -4,7 +4,7 @@
 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64x,+xandesvsintload \
 ; RUN:   -verify-machineinstrs -target-abi=lp64 | FileCheck %s
 
-define <vscale x 1 x i8> @intrinsic_nds_vlnu_v_nxv1i8_nxv1i8(<vscale x 1 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 1 x i8> @intrinsic_nds_vlnu_v_nxv1i8_nxv1i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -13,13 +13,13 @@ define <vscale x 1 x i8> @intrinsic_nds_vlnu_v_nxv1i8_nxv1i8(<vscale x 1 x i8>*
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.nds.vlnu.nxv1i8(
     <vscale x 1 x i8> poison,
-    <vscale x 1 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 1 x i8> @intrinsic_nds_vlnu_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
+define <vscale x 1 x i8> @intrinsic_nds_vlnu_mask_v_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, ptr %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
@@ -28,14 +28,14 @@ define <vscale x 1 x i8> @intrinsic_nds_vlnu_mask_v_nxv1i8_nxv1i8(<vscale x 1 x
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.nds.vlnu.mask.nxv1i8(
     <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
+    ptr %1,
     <vscale x 1 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 1 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_nds_vlnu_v_nxv2i8_nxv2i8(<vscale x 2 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 2 x i8> @intrinsic_nds_vlnu_v_nxv2i8_nxv2i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -44,13 +44,13 @@ define <vscale x 2 x i8> @intrinsic_nds_vlnu_v_nxv2i8_nxv2i8(<vscale x 2 x i8>*
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.nds.vlnu.nxv2i8(
     <vscale x 2 x i8> poison,
-    <vscale x 2 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 2 x i8> @intrinsic_nds_vlnu_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
+define <vscale x 2 x i8> @intrinsic_nds_vlnu_mask_v_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, ptr %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
@@ -59,14 +59,14 @@ define <vscale x 2 x i8> @intrinsic_nds_vlnu_mask_v_nxv2i8_nxv2i8(<vscale x 2 x
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.nds.vlnu.mask.nxv2i8(
     <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
+    ptr %1,
     <vscale x 2 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 2 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_nds_vlnu_v_nxv4i8_nxv4i8(<vscale x 4 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 4 x i8> @intrinsic_nds_vlnu_v_nxv4i8_nxv4i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -75,13 +75,13 @@ define <vscale x 4 x i8> @intrinsic_nds_vlnu_v_nxv4i8_nxv4i8(<vscale x 4 x i8>*
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.nds.vlnu.nxv4i8(
     <vscale x 4 x i8> poison,
-    <vscale x 4 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 4 x i8> @intrinsic_nds_vlnu_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
+define <vscale x 4 x i8> @intrinsic_nds_vlnu_mask_v_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, ptr %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
@@ -90,14 +90,14 @@ define <vscale x 4 x i8> @intrinsic_nds_vlnu_mask_v_nxv4i8_nxv4i8(<vscale x 4 x
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.nds.vlnu.mask.nxv4i8(
     <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
+    ptr %1,
     <vscale x 4 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 4 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_nds_vlnu_v_nxv8i8_nxv8i8(<vscale x 8 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 8 x i8> @intrinsic_nds_vlnu_v_nxv8i8_nxv8i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -106,13 +106,13 @@ define <vscale x 8 x i8> @intrinsic_nds_vlnu_v_nxv8i8_nxv8i8(<vscale x 8 x i8>*
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.nds.vlnu.nxv8i8(
     <vscale x 8 x i8> poison,
-    <vscale x 8 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 8 x i8> @intrinsic_nds_vlnu_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
+define <vscale x 8 x i8> @intrinsic_nds_vlnu_mask_v_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, ptr %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
@@ -121,14 +121,14 @@ define <vscale x 8 x i8> @intrinsic_nds_vlnu_mask_v_nxv8i8_nxv8i8(<vscale x 8 x
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.nds.vlnu.mask.nxv8i8(
     <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
+    ptr %1,
     <vscale x 8 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 8 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_nds_vlnu_v_nxv16i8_nxv16i8(<vscale x 16 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 16 x i8> @intrinsic_nds_vlnu_v_nxv16i8_nxv16i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -137,13 +137,13 @@ define <vscale x 16 x i8> @intrinsic_nds_vlnu_v_nxv16i8_nxv16i8(<vscale x 16 x i
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.nds.vlnu.nxv16i8(
     <vscale x 16 x i8> poison,
-    <vscale x 16 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 16 x i8> @intrinsic_nds_vlnu_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8>* %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
+define <vscale x 16 x i8> @intrinsic_nds_vlnu_mask_v_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, ptr %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
@@ -152,14 +152,14 @@ define <vscale x 16 x i8> @intrinsic_nds_vlnu_mask_v_nxv16i8_nxv16i8(<vscale x 1
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.nds.vlnu.mask.nxv16i8(
     <vscale x 16 x i8> %0,
-    <vscale x 16 x i8>* %1,
+    ptr %1,
     <vscale x 16 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 16 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_nds_vlnu_v_nxv32i8_nxv32i8(<vscale x 32 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 32 x i8> @intrinsic_nds_vlnu_v_nxv32i8_nxv32i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -168,13 +168,13 @@ define <vscale x 32 x i8> @intrinsic_nds_vlnu_v_nxv32i8_nxv32i8(<vscale x 32 x i
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.nds.vlnu.nxv32i8(
     <vscale x 32 x i8> poison,
-    <vscale x 32 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 32 x i8> @intrinsic_nds_vlnu_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8>* %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
+define <vscale x 32 x i8> @intrinsic_nds_vlnu_mask_v_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, ptr %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
@@ -183,14 +183,14 @@ define <vscale x 32 x i8> @intrinsic_nds_vlnu_mask_v_nxv32i8_nxv32i8(<vscale x 3
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.nds.vlnu.mask.nxv32i8(
     <vscale x 32 x i8> %0,
-    <vscale x 32 x i8>* %1,
+    ptr %1,
     <vscale x 32 x i1> %2,
     iXLen %3, iXLen 1)
 
   ret <vscale x 32 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_nds_vlnu_v_nxv64i8_nxv64i8(<vscale x 64 x i8>* %0, iXLen %1) nounwind {
+define <vscale x 64 x i8> @intrinsic_nds_vlnu_v_nxv64i8_nxv64i8(ptr %0, iXLen %1) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
@@ -199,13 +199,13 @@ define <vscale x 64 x i8> @intrinsic_nds_vlnu_v_nxv64i8_nxv64i8(<vscale x 64 x i
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.nds.vlnu.nxv64i8(
     <vscale x 64 x i8> poison,
-    <vscale x 64 x i8>* %0,
+    ptr %0,
     iXLen %1)
 
   ret <vscale x 64 x i8> %a
 }
 
-define <vscale x 64 x i8> @intrinsic_nds_vlnu_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8>* %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
+define <vscale x 64 x i8> @intrinsic_nds_vlnu_mask_v_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, ptr %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_nds_vlnu_mask_v_nxv64i8_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
@@ -214,7 +214,7 @@ define <vscale x 64 x i8> @intrinsic_nds_vlnu_mask_v_nxv64i8_nxv64i8(<vscale x 6
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.nds.vlnu.mask.nxv64i8(
     <vscale x 64 x i8> %0,
-    <vscale x 64 x i8>* %1,
+    ptr %1,
     <vscale x 64 x i1> %2,
     iXLen %3, iXLen 1)
 


        


More information about the llvm-commits mailing list