[llvm] 1fdbdb8 - [riscv] Convert a set of tests to opaque pointers

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 14 08:57:21 PST 2023


Author: Philip Reames
Date: 2023-12-14T08:57:13-08:00
New Revision: 1fdbdb84a1934e8cdff6c9b9bf7ba75301387dd2

URL: https://github.com/llvm/llvm-project/commit/1fdbdb84a1934e8cdff6c9b9bf7ba75301387dd2
DIFF: https://github.com/llvm/llvm-project/commit/1fdbdb84a1934e8cdff6c9b9bf7ba75301387dd2.diff

LOG: [riscv] Convert a set of tests to opaque pointers

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
index 4bff79ba935dae..a40bbe72004346 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
 ; RUN:     -verify-machineinstrs | FileCheck %s
 
-define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vscale x 4 x i16> *%pb) nounwind {
+define void @vadd_vint16m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re16.v v8, (a1)
@@ -13,14 +13,14 @@ define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pa
-  %vb = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pb
+  %va = load <vscale x 4 x i16>, ptr %pa
+  %vb = load <vscale x 4 x i16>, ptr %pb
   %vc = add <vscale x 4 x i16> %va, %vb
-  store <vscale x 4 x i16> %vc, <vscale x 4 x i16> *%pc
+  store <vscale x 4 x i16> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vscale x 8 x i16> *%pb) nounwind {
+define void @vadd_vint16m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re16.v v8, (a1)
@@ -29,14 +29,14 @@ define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pa
-  %vb = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pb
+  %va = load <vscale x 8 x i16>, ptr %pa
+  %vb = load <vscale x 8 x i16>, ptr %pb
   %vc = add <vscale x 8 x i16> %va, %vb
-  store <vscale x 8 x i16> %vc, <vscale x 8 x i16> *%pc
+  store <vscale x 8 x i16> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <vscale x 16 x i16> *%pb) nounwind {
+define void @vadd_vint16m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re16.v v8, (a1)
@@ -45,14 +45,14 @@ define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    vs4r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pa
-  %vb = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pb
+  %va = load <vscale x 16 x i16>, ptr %pa
+  %vb = load <vscale x 16 x i16>, ptr %pb
   %vc = add <vscale x 16 x i16> %va, %vb
-  store <vscale x 16 x i16> %vc, <vscale x 16 x i16> *%pc
+  store <vscale x 16 x i16> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <vscale x 32 x i16> *%pb) nounwind {
+define void @vadd_vint16m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint16m8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re16.v v8, (a1)
@@ -61,14 +61,14 @@ define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pa
-  %vb = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pb
+  %va = load <vscale x 32 x i16>, ptr %pa
+  %vb = load <vscale x 32 x i16>, ptr %pb
   %vc = add <vscale x 32 x i16> %va, %vb
-  store <vscale x 32 x i16> %vc, <vscale x 32 x i16> *%pc
+  store <vscale x 32 x i16> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, <vscale x 2 x i16> *%pb) nounwind {
+define void @vadd_vint16mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint16mf2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16, mf2, ta, ma
@@ -77,14 +77,14 @@ define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, <v
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 2 x i16>, <vscale x 2 x i16>* %pa
-  %vb = load <vscale x 2 x i16>, <vscale x 2 x i16>* %pb
+  %va = load <vscale x 2 x i16>, ptr %pa
+  %vb = load <vscale x 2 x i16>, ptr %pb
   %vc = add <vscale x 2 x i16> %va, %vb
-  store <vscale x 2 x i16> %vc, <vscale x 2 x i16> *%pc
+  store <vscale x 2 x i16> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint16mf4(<vscale x 1 x i16> *%pc, <vscale x 1 x i16> *%pa, <vscale x 1 x i16> *%pb) nounwind {
+define void @vadd_vint16mf4(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint16mf4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16, mf4, ta, ma
@@ -93,9 +93,9 @@ define void @vadd_vint16mf4(<vscale x 1 x i16> *%pc, <vscale x 1 x i16> *%pa, <v
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pa
-  %vb = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pb
+  %va = load <vscale x 1 x i16>, ptr %pa
+  %vb = load <vscale x 1 x i16>, ptr %pb
   %vc = add <vscale x 1 x i16> %va, %vb
-  store <vscale x 1 x i16> %vc, <vscale x 1 x i16> *%pc
+  store <vscale x 1 x i16> %vc, ptr %pc
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
index a312ce74c2ee3e..3845b073f83de0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
 ; RUN:     -verify-machineinstrs | FileCheck %s
 
-define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vscale x 2 x i32> *%pb) nounwind {
+define void @vadd_vint32m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re32.v v8, (a1)
@@ -13,14 +13,14 @@ define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pa
-  %vb = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pb
+  %va = load <vscale x 2 x i32>, ptr %pa
+  %vb = load <vscale x 2 x i32>, ptr %pb
   %vc = add <vscale x 2 x i32> %va, %vb
-  store <vscale x 2 x i32> %vc, <vscale x 2 x i32> *%pc
+  store <vscale x 2 x i32> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vscale x 4 x i32> *%pb) nounwind {
+define void @vadd_vint32m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re32.v v8, (a1)
@@ -29,14 +29,14 @@ define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pa
-  %vb = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pb
+  %va = load <vscale x 4 x i32>, ptr %pa
+  %vb = load <vscale x 4 x i32>, ptr %pb
   %vc = add <vscale x 4 x i32> %va, %vb
-  store <vscale x 4 x i32> %vc, <vscale x 4 x i32> *%pc
+  store <vscale x 4 x i32> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vscale x 8 x i32> *%pb) nounwind {
+define void @vadd_vint32m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re32.v v8, (a1)
@@ -45,14 +45,14 @@ define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    vs4r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pa
-  %vb = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pb
+  %va = load <vscale x 8 x i32>, ptr %pa
+  %vb = load <vscale x 8 x i32>, ptr %pb
   %vc = add <vscale x 8 x i32> %va, %vb
-  store <vscale x 8 x i32> %vc, <vscale x 8 x i32> *%pc
+  store <vscale x 8 x i32> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <vscale x 16 x i32> *%pb) nounwind {
+define void @vadd_vint32m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint32m8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re32.v v8, (a1)
@@ -61,14 +61,14 @@ define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pa
-  %vb = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pb
+  %va = load <vscale x 16 x i32>, ptr %pa
+  %vb = load <vscale x 16 x i32>, ptr %pb
   %vc = add <vscale x 16 x i32> %va, %vb
-  store <vscale x 16 x i32> %vc, <vscale x 16 x i32> *%pc
+  store <vscale x 16 x i32> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint32mf2(<vscale x 1 x i32> *%pc, <vscale x 1 x i32> *%pa, <vscale x 1 x i32> *%pb) nounwind {
+define void @vadd_vint32mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint32mf2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e32, mf2, ta, ma
@@ -77,9 +77,9 @@ define void @vadd_vint32mf2(<vscale x 1 x i32> *%pc, <vscale x 1 x i32> *%pa, <v
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pa
-  %vb = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pb
+  %va = load <vscale x 1 x i32>, ptr %pa
+  %vb = load <vscale x 1 x i32>, ptr %pb
   %vc = add <vscale x 1 x i32> %va, %vb
-  store <vscale x 1 x i32> %vc, <vscale x 1 x i32> *%pc
+  store <vscale x 1 x i32> %vc, ptr %pc
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
index 2105197eded158..ecc1e16f42e3a5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
 ; RUN:     -verify-machineinstrs | FileCheck %s
 
-define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vscale x 1 x i64> *%pb) nounwind {
+define void @vadd_vint64m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1re64.v v8, (a1)
@@ -13,14 +13,14 @@ define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa
-  %vb = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pb
+  %va = load <vscale x 1 x i64>, ptr %pa
+  %vb = load <vscale x 1 x i64>, ptr %pb
   %vc = add <vscale x 1 x i64> %va, %vb
-  store <vscale x 1 x i64> %vc, <vscale x 1 x i64> *%pc
+  store <vscale x 1 x i64> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vscale x 2 x i64> *%pb) nounwind {
+define void @vadd_vint64m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2re64.v v8, (a1)
@@ -29,14 +29,14 @@ define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pa
-  %vb = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pb
+  %va = load <vscale x 2 x i64>, ptr %pa
+  %vb = load <vscale x 2 x i64>, ptr %pb
   %vc = add <vscale x 2 x i64> %va, %vb
-  store <vscale x 2 x i64> %vc, <vscale x 2 x i64> *%pc
+  store <vscale x 2 x i64> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vscale x 4 x i64> *%pb) nounwind {
+define void @vadd_vint64m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4re64.v v8, (a1)
@@ -45,14 +45,14 @@ define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    vs4r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pa
-  %vb = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pb
+  %va = load <vscale x 4 x i64>, ptr %pa
+  %vb = load <vscale x 4 x i64>, ptr %pb
   %vc = add <vscale x 4 x i64> %va, %vb
-  store <vscale x 4 x i64> %vc, <vscale x 4 x i64> *%pc
+  store <vscale x 4 x i64> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, <vscale x 8 x i64> *%pb) nounwind {
+define void @vadd_vint64m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint64m8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8re64.v v8, (a1)
@@ -61,9 +61,9 @@ define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, <vs
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pa
-  %vb = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pb
+  %va = load <vscale x 8 x i64>, ptr %pa
+  %vb = load <vscale x 8 x i64>, ptr %pb
   %vc = add <vscale x 8 x i64> %va, %vb
-  store <vscale x 8 x i64> %vc, <vscale x 8 x i64> *%pc
+  store <vscale x 8 x i64> %vc, ptr %pc
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
index 2a1231249803a2..246364c9547f1a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple riscv64 -mattr=+v %s -o - \
 ; RUN:     -verify-machineinstrs | FileCheck %s
 
-define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 8 x i8> *%pa, <vscale x 8 x i8> *%pb) nounwind {
+define void @vadd_vint8m1(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m1:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl1r.v v8, (a1)
@@ -13,14 +13,14 @@ define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 8 x i8> *%pa, <vscal
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vs1r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pa
-  %vb = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pb
+  %va = load <vscale x 8 x i8>, ptr %pa
+  %vb = load <vscale x 8 x i8>, ptr %pb
   %vc = add <vscale x 8 x i8> %va, %vb
-  store <vscale x 8 x i8> %vc, <vscale x 8 x i8> *%pc
+  store <vscale x 8 x i8> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale x 16 x i8> *%pa, <vscale x 16 x i8> *%pb) nounwind {
+define void @vadd_vint8m2(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl2r.v v8, (a1)
@@ -29,14 +29,14 @@ define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale x 16 x i8> *%pa, <vsc
 ; CHECK-NEXT:    vadd.vv v8, v8, v10
 ; CHECK-NEXT:    vs2r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pa
-  %vb = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pb
+  %va = load <vscale x 16 x i8>, ptr %pa
+  %vb = load <vscale x 16 x i8>, ptr %pb
   %vc = add <vscale x 16 x i8> %va, %vb
-  store <vscale x 16 x i8> %vc, <vscale x 16 x i8> *%pc
+  store <vscale x 16 x i8> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale x 32 x i8> *%pa, <vscale x 32 x i8> *%pb) nounwind {
+define void @vadd_vint8m4(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl4r.v v8, (a1)
@@ -45,14 +45,14 @@ define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale x 32 x i8> *%pa, <vsc
 ; CHECK-NEXT:    vadd.vv v8, v8, v12
 ; CHECK-NEXT:    vs4r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pa
-  %vb = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pb
+  %va = load <vscale x 32 x i8>, ptr %pa
+  %vb = load <vscale x 32 x i8>, ptr %pb
   %vc = add <vscale x 32 x i8> %va, %vb
-  store <vscale x 32 x i8> %vc, <vscale x 32 x i8> *%pc
+  store <vscale x 32 x i8> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale x 64 x i8> *%pa, <vscale x 64 x i8> *%pb) nounwind {
+define void @vadd_vint8m8(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint8m8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vl8r.v v8, (a1)
@@ -61,14 +61,14 @@ define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale x 64 x i8> *%pa, <vsc
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
 ; CHECK-NEXT:    vs8r.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pa
-  %vb = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pb
+  %va = load <vscale x 64 x i8>, ptr %pa
+  %vb = load <vscale x 64 x i8>, ptr %pb
   %vc = add <vscale x 64 x i8> %va, %vb
-  store <vscale x 64 x i8> %vc, <vscale x 64 x i8> *%pc
+  store <vscale x 64 x i8> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint8mf2(<vscale x 4 x i8> *%pc, <vscale x 4 x i8> *%pa, <vscale x 4 x i8> *%pb) nounwind {
+define void @vadd_vint8mf2(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint8mf2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
@@ -77,14 +77,14 @@ define void @vadd_vint8mf2(<vscale x 4 x i8> *%pc, <vscale x 4 x i8> *%pa, <vsca
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pa
-  %vb = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pb
+  %va = load <vscale x 4 x i8>, ptr %pa
+  %vb = load <vscale x 4 x i8>, ptr %pb
   %vc = add <vscale x 4 x i8> %va, %vb
-  store <vscale x 4 x i8> %vc, <vscale x 4 x i8> *%pc
+  store <vscale x 4 x i8> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint8mf4(<vscale x 2 x i8> *%pc, <vscale x 2 x i8> *%pa, <vscale x 2 x i8> *%pb) nounwind {
+define void @vadd_vint8mf4(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint8mf4:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, ma
@@ -93,14 +93,14 @@ define void @vadd_vint8mf4(<vscale x 2 x i8> *%pc, <vscale x 2 x i8> *%pa, <vsca
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pa
-  %vb = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pb
+  %va = load <vscale x 2 x i8>, ptr %pa
+  %vb = load <vscale x 2 x i8>, ptr %pb
   %vc = add <vscale x 2 x i8> %va, %vb
-  store <vscale x 2 x i8> %vc, <vscale x 2 x i8> *%pc
+  store <vscale x 2 x i8> %vc, ptr %pc
   ret void
 }
 
-define void @vadd_vint8mf8(<vscale x 1 x i8> *%pc, <vscale x 1 x i8> *%pa, <vscale x 1 x i8> *%pb) nounwind {
+define void @vadd_vint8mf8(ptr %pc, ptr %pa, ptr %pb) nounwind {
 ; CHECK-LABEL: vadd_vint8mf8:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf8, ta, ma
@@ -109,9 +109,9 @@ define void @vadd_vint8mf8(<vscale x 1 x i8> *%pc, <vscale x 1 x i8> *%pa, <vsca
 ; CHECK-NEXT:    vadd.vv v8, v8, v9
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
-  %va = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pa
-  %vb = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pb
+  %va = load <vscale x 1 x i8>, ptr %pa
+  %vb = load <vscale x 1 x i8>, ptr %pb
   %vc = add <vscale x 1 x i8> %va, %vb
-  store <vscale x 1 x i8> %vc, <vscale x 1 x i8> *%pc
+  store <vscale x 1 x i8> %vc, ptr %pc
   ret void
 }


        


More information about the llvm-commits mailing list