[llvm] e9a4f2c - [RISCV] Convert test to opaque pointers. NFC
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 4 23:48:28 PST 2024
Author: Luke Lau
Date: 2024-11-05T15:48:18+08:00
New Revision: e9a4f2c30a108c51eaa18f4ba2e803079aa7b8e1
URL: https://github.com/llvm/llvm-project/commit/e9a4f2c30a108c51eaa18f4ba2e803079aa7b8e1
DIFF: https://github.com/llvm/llvm-project/commit/e9a4f2c30a108c51eaa18f4ba2e803079aa7b8e1.diff
LOG: [RISCV] Convert test to opaque pointers. NFC
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
index 80110b3eef4dd8..683ead4f1c308f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-fp.ll
@@ -2,7 +2,7 @@
; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
-define void @masked_store_v1f16(<1 x half>* %val_ptr, <1 x half>* %a, <1 x half>* %m_ptr) nounwind {
+define void @masked_store_v1f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
@@ -12,15 +12,15 @@ define void @masked_store_v1f16(<1 x half>* %val_ptr, <1 x half>* %a, <1 x half>
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse16.v v9, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <1 x half>, <1 x half>* %m_ptr
+ %m = load <1 x half>, ptr %m_ptr
%mask = fcmp oeq <1 x half> %m, zeroinitializer
- %val = load <1 x half>, <1 x half>* %val_ptr
- call void @llvm.masked.store.v1f16.p0v1f16(<1 x half> %val, <1 x half>* %a, i32 8, <1 x i1> %mask)
+ %val = load <1 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v1f16.p0v1f16(<1 x half> %val, ptr %a, i32 8, <1 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v1f16.p0v1f16(<1 x half>, <1 x half>*, i32, <1 x i1>)
+declare void @llvm.masked.store.v1f16.p0v1f16(<1 x half>, ptr, i32, <1 x i1>)
-define void @masked_store_v1f32(<1 x float>* %val_ptr, <1 x float>* %a, <1 x float>* %m_ptr) nounwind {
+define void @masked_store_v1f32(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
@@ -30,15 +30,15 @@ define void @masked_store_v1f32(<1 x float>* %val_ptr, <1 x float>* %a, <1 x flo
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse32.v v9, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <1 x float>, <1 x float>* %m_ptr
+ %m = load <1 x float>, ptr %m_ptr
%mask = fcmp oeq <1 x float> %m, zeroinitializer
- %val = load <1 x float>, <1 x float>* %val_ptr
- call void @llvm.masked.store.v1f32.p0v1f32(<1 x float> %val, <1 x float>* %a, i32 8, <1 x i1> %mask)
+ %val = load <1 x float>, ptr %val_ptr
+ call void @llvm.masked.store.v1f32.p0v1f32(<1 x float> %val, ptr %a, i32 8, <1 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v1f32.p0v1f32(<1 x float>, <1 x float>*, i32, <1 x i1>)
+declare void @llvm.masked.store.v1f32.p0v1f32(<1 x float>, ptr, i32, <1 x i1>)
-define void @masked_store_v1f64(<1 x double>* %val_ptr, <1 x double>* %a, <1 x double>* %m_ptr) nounwind {
+define void @masked_store_v1f64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32-LABEL: masked_store_v1f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
@@ -58,15 +58,15 @@ define void @masked_store_v1f64(<1 x double>* %val_ptr, <1 x double>* %a, <1 x d
; RV64-NEXT: vmfeq.vf v0, v8, fa5
; RV64-NEXT: vse64.v v9, (a1), v0.t
; RV64-NEXT: ret
- %m = load <1 x double>, <1 x double>* %m_ptr
+ %m = load <1 x double>, ptr %m_ptr
%mask = fcmp oeq <1 x double> %m, zeroinitializer
- %val = load <1 x double>, <1 x double>* %val_ptr
- call void @llvm.masked.store.v1f64.p0v1f64(<1 x double> %val, <1 x double>* %a, i32 8, <1 x i1> %mask)
+ %val = load <1 x double>, ptr %val_ptr
+ call void @llvm.masked.store.v1f64.p0v1f64(<1 x double> %val, ptr %a, i32 8, <1 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v1f64.p0v1f64(<1 x double>, <1 x double>*, i32, <1 x i1>)
+declare void @llvm.masked.store.v1f64.p0v1f64(<1 x double>, ptr, i32, <1 x i1>)
-define void @masked_store_v2f16(<2 x half>* %val_ptr, <2 x half>* %a, <2 x half>* %m_ptr) nounwind {
+define void @masked_store_v2f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
@@ -76,15 +76,15 @@ define void @masked_store_v2f16(<2 x half>* %val_ptr, <2 x half>* %a, <2 x half>
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse16.v v9, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <2 x half>, <2 x half>* %m_ptr
+ %m = load <2 x half>, ptr %m_ptr
%mask = fcmp oeq <2 x half> %m, zeroinitializer
- %val = load <2 x half>, <2 x half>* %val_ptr
- call void @llvm.masked.store.v2f16.p0v2f16(<2 x half> %val, <2 x half>* %a, i32 8, <2 x i1> %mask)
+ %val = load <2 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v2f16.p0v2f16(<2 x half> %val, ptr %a, i32 8, <2 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v2f16.p0v2f16(<2 x half>, <2 x half>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2f16.p0v2f16(<2 x half>, ptr, i32, <2 x i1>)
-define void @masked_store_v2f32(<2 x float>* %val_ptr, <2 x float>* %a, <2 x float>* %m_ptr) nounwind {
+define void @masked_store_v2f32(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
@@ -94,15 +94,15 @@ define void @masked_store_v2f32(<2 x float>* %val_ptr, <2 x float>* %a, <2 x flo
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse32.v v9, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <2 x float>, <2 x float>* %m_ptr
+ %m = load <2 x float>, ptr %m_ptr
%mask = fcmp oeq <2 x float> %m, zeroinitializer
- %val = load <2 x float>, <2 x float>* %val_ptr
- call void @llvm.masked.store.v2f32.p0v2f32(<2 x float> %val, <2 x float>* %a, i32 8, <2 x i1> %mask)
+ %val = load <2 x float>, ptr %val_ptr
+ call void @llvm.masked.store.v2f32.p0v2f32(<2 x float> %val, ptr %a, i32 8, <2 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v2f32.p0v2f32(<2 x float>, <2 x float>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2f32.p0v2f32(<2 x float>, ptr, i32, <2 x i1>)
-define void @masked_store_v2f64(<2 x double>* %val_ptr, <2 x double>* %a, <2 x double>* %m_ptr) nounwind {
+define void @masked_store_v2f64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32-LABEL: masked_store_v2f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
@@ -122,15 +122,15 @@ define void @masked_store_v2f64(<2 x double>* %val_ptr, <2 x double>* %a, <2 x d
; RV64-NEXT: vmfeq.vf v0, v8, fa5
; RV64-NEXT: vse64.v v9, (a1), v0.t
; RV64-NEXT: ret
- %m = load <2 x double>, <2 x double>* %m_ptr
+ %m = load <2 x double>, ptr %m_ptr
%mask = fcmp oeq <2 x double> %m, zeroinitializer
- %val = load <2 x double>, <2 x double>* %val_ptr
- call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %a, i32 8, <2 x i1> %mask)
+ %val = load <2 x double>, ptr %val_ptr
+ call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> %val, ptr %a, i32 8, <2 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, i32, <2 x i1>)
+declare void @llvm.masked.store.v2f64.p0v2f64(<2 x double>, ptr, i32, <2 x i1>)
-define void @masked_store_v4f16(<4 x half>* %val_ptr, <4 x half>* %a, <4 x half>* %m_ptr) nounwind {
+define void @masked_store_v4f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
@@ -140,15 +140,15 @@ define void @masked_store_v4f16(<4 x half>* %val_ptr, <4 x half>* %a, <4 x half>
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse16.v v9, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <4 x half>, <4 x half>* %m_ptr
+ %m = load <4 x half>, ptr %m_ptr
%mask = fcmp oeq <4 x half> %m, zeroinitializer
- %val = load <4 x half>, <4 x half>* %val_ptr
- call void @llvm.masked.store.v4f16.p0v4f16(<4 x half> %val, <4 x half>* %a, i32 8, <4 x i1> %mask)
+ %val = load <4 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v4f16.p0v4f16(<4 x half> %val, ptr %a, i32 8, <4 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v4f16.p0v4f16(<4 x half>, <4 x half>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v4f16.p0v4f16(<4 x half>, ptr, i32, <4 x i1>)
-define void @masked_store_v4f32(<4 x float>* %val_ptr, <4 x float>* %a, <4 x float>* %m_ptr) nounwind {
+define void @masked_store_v4f32(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
@@ -158,15 +158,15 @@ define void @masked_store_v4f32(<4 x float>* %val_ptr, <4 x float>* %a, <4 x flo
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse32.v v9, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <4 x float>, <4 x float>* %m_ptr
+ %m = load <4 x float>, ptr %m_ptr
%mask = fcmp oeq <4 x float> %m, zeroinitializer
- %val = load <4 x float>, <4 x float>* %val_ptr
- call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %a, i32 8, <4 x i1> %mask)
+ %val = load <4 x float>, ptr %val_ptr
+ call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> %val, ptr %a, i32 8, <4 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v4f32.p0v4f32(<4 x float>, ptr, i32, <4 x i1>)
-define void @masked_store_v4f64(<4 x double>* %val_ptr, <4 x double>* %a, <4 x double>* %m_ptr) nounwind {
+define void @masked_store_v4f64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32-LABEL: masked_store_v4f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
@@ -186,15 +186,15 @@ define void @masked_store_v4f64(<4 x double>* %val_ptr, <4 x double>* %a, <4 x d
; RV64-NEXT: vmfeq.vf v0, v8, fa5
; RV64-NEXT: vse64.v v10, (a1), v0.t
; RV64-NEXT: ret
- %m = load <4 x double>, <4 x double>* %m_ptr
+ %m = load <4 x double>, ptr %m_ptr
%mask = fcmp oeq <4 x double> %m, zeroinitializer
- %val = load <4 x double>, <4 x double>* %val_ptr
- call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %val, <4 x double>* %a, i32 8, <4 x i1> %mask)
+ %val = load <4 x double>, ptr %val_ptr
+ call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> %val, ptr %a, i32 8, <4 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v4f64.p0v4f64(<4 x double>, <4 x double>*, i32, <4 x i1>)
+declare void @llvm.masked.store.v4f64.p0v4f64(<4 x double>, ptr, i32, <4 x i1>)
-define void @masked_store_v8f16(<8 x half>* %val_ptr, <8 x half>* %a, <8 x half>* %m_ptr) nounwind {
+define void @masked_store_v8f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
@@ -204,15 +204,15 @@ define void @masked_store_v8f16(<8 x half>* %val_ptr, <8 x half>* %a, <8 x half>
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse16.v v9, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <8 x half>, <8 x half>* %m_ptr
+ %m = load <8 x half>, ptr %m_ptr
%mask = fcmp oeq <8 x half> %m, zeroinitializer
- %val = load <8 x half>, <8 x half>* %val_ptr
- call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %val, <8 x half>* %a, i32 8, <8 x i1> %mask)
+ %val = load <8 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v8f16.p0v8f16(<8 x half> %val, ptr %a, i32 8, <8 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v8f16.p0v8f16(<8 x half>, ptr, i32, <8 x i1>)
-define void @masked_store_v8f32(<8 x float>* %val_ptr, <8 x float>* %a, <8 x float>* %m_ptr) nounwind {
+define void @masked_store_v8f32(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
@@ -222,15 +222,15 @@ define void @masked_store_v8f32(<8 x float>* %val_ptr, <8 x float>* %a, <8 x flo
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse32.v v10, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <8 x float>, <8 x float>* %m_ptr
+ %m = load <8 x float>, ptr %m_ptr
%mask = fcmp oeq <8 x float> %m, zeroinitializer
- %val = load <8 x float>, <8 x float>* %val_ptr
- call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> %val, <8 x float>* %a, i32 8, <8 x i1> %mask)
+ %val = load <8 x float>, ptr %val_ptr
+ call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> %val, ptr %a, i32 8, <8 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v8f32.p0v8f32(<8 x float>, <8 x float>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v8f32.p0v8f32(<8 x float>, ptr, i32, <8 x i1>)
-define void @masked_store_v8f64(<8 x double>* %val_ptr, <8 x double>* %a, <8 x double>* %m_ptr) nounwind {
+define void @masked_store_v8f64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32-LABEL: masked_store_v8f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
@@ -250,15 +250,15 @@ define void @masked_store_v8f64(<8 x double>* %val_ptr, <8 x double>* %a, <8 x d
; RV64-NEXT: vmfeq.vf v0, v8, fa5
; RV64-NEXT: vse64.v v12, (a1), v0.t
; RV64-NEXT: ret
- %m = load <8 x double>, <8 x double>* %m_ptr
+ %m = load <8 x double>, ptr %m_ptr
%mask = fcmp oeq <8 x double> %m, zeroinitializer
- %val = load <8 x double>, <8 x double>* %val_ptr
- call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %val, <8 x double>* %a, i32 8, <8 x i1> %mask)
+ %val = load <8 x double>, ptr %val_ptr
+ call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %val, ptr %a, i32 8, <8 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, i32, <8 x i1>)
+declare void @llvm.masked.store.v8f64.p0v8f64(<8 x double>, ptr, i32, <8 x i1>)
-define void @masked_store_v16f16(<16 x half>* %val_ptr, <16 x half>* %a, <16 x half>* %m_ptr) nounwind {
+define void @masked_store_v16f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
@@ -268,15 +268,15 @@ define void @masked_store_v16f16(<16 x half>* %val_ptr, <16 x half>* %a, <16 x h
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse16.v v10, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <16 x half>, <16 x half>* %m_ptr
+ %m = load <16 x half>, ptr %m_ptr
%mask = fcmp oeq <16 x half> %m, zeroinitializer
- %val = load <16 x half>, <16 x half>* %val_ptr
- call void @llvm.masked.store.v16f16.p0v16f16(<16 x half> %val, <16 x half>* %a, i32 8, <16 x i1> %mask)
+ %val = load <16 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v16f16.p0v16f16(<16 x half> %val, ptr %a, i32 8, <16 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v16f16.p0v16f16(<16 x half>, <16 x half>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v16f16.p0v16f16(<16 x half>, ptr, i32, <16 x i1>)
-define void @masked_store_v16f32(<16 x float>* %val_ptr, <16 x float>* %a, <16 x float>* %m_ptr) nounwind {
+define void @masked_store_v16f32(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
@@ -286,15 +286,15 @@ define void @masked_store_v16f32(<16 x float>* %val_ptr, <16 x float>* %a, <16 x
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse32.v v12, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <16 x float>, <16 x float>* %m_ptr
+ %m = load <16 x float>, ptr %m_ptr
%mask = fcmp oeq <16 x float> %m, zeroinitializer
- %val = load <16 x float>, <16 x float>* %val_ptr
- call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %val, <16 x float>* %a, i32 8, <16 x i1> %mask)
+ %val = load <16 x float>, ptr %val_ptr
+ call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %val, ptr %a, i32 8, <16 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, <16 x float>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v16f32.p0v16f32(<16 x float>, ptr, i32, <16 x i1>)
-define void @masked_store_v16f64(<16 x double>* %val_ptr, <16 x double>* %a, <16 x double>* %m_ptr) nounwind {
+define void @masked_store_v16f64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32-LABEL: masked_store_v16f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
@@ -314,15 +314,15 @@ define void @masked_store_v16f64(<16 x double>* %val_ptr, <16 x double>* %a, <16
; RV64-NEXT: vmfeq.vf v0, v8, fa5
; RV64-NEXT: vse64.v v16, (a1), v0.t
; RV64-NEXT: ret
- %m = load <16 x double>, <16 x double>* %m_ptr
+ %m = load <16 x double>, ptr %m_ptr
%mask = fcmp oeq <16 x double> %m, zeroinitializer
- %val = load <16 x double>, <16 x double>* %val_ptr
- call void @llvm.masked.store.v16f64.p0v16f64(<16 x double> %val, <16 x double>* %a, i32 8, <16 x i1> %mask)
+ %val = load <16 x double>, ptr %val_ptr
+ call void @llvm.masked.store.v16f64.p0v16f64(<16 x double> %val, ptr %a, i32 8, <16 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v16f64.p0v16f64(<16 x double>, <16 x double>*, i32, <16 x i1>)
+declare void @llvm.masked.store.v16f64.p0v16f64(<16 x double>, ptr, i32, <16 x i1>)
-define void @masked_store_v32f16(<32 x half>* %val_ptr, <32 x half>* %a, <32 x half>* %m_ptr) nounwind {
+define void @masked_store_v32f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 32
@@ -333,15 +333,15 @@ define void @masked_store_v32f16(<32 x half>* %val_ptr, <32 x half>* %a, <32 x h
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse16.v v12, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <32 x half>, <32 x half>* %m_ptr
+ %m = load <32 x half>, ptr %m_ptr
%mask = fcmp oeq <32 x half> %m, zeroinitializer
- %val = load <32 x half>, <32 x half>* %val_ptr
- call void @llvm.masked.store.v32f16.p0v32f16(<32 x half> %val, <32 x half>* %a, i32 8, <32 x i1> %mask)
+ %val = load <32 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v32f16.p0v32f16(<32 x half> %val, ptr %a, i32 8, <32 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v32f16.p0v32f16(<32 x half>, <32 x half>*, i32, <32 x i1>)
+declare void @llvm.masked.store.v32f16.p0v32f16(<32 x half>, ptr, i32, <32 x i1>)
-define void @masked_store_v32f32(<32 x float>* %val_ptr, <32 x float>* %a, <32 x float>* %m_ptr) nounwind {
+define void @masked_store_v32f32(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v32f32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 32
@@ -352,15 +352,15 @@ define void @masked_store_v32f32(<32 x float>* %val_ptr, <32 x float>* %a, <32 x
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse32.v v16, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <32 x float>, <32 x float>* %m_ptr
+ %m = load <32 x float>, ptr %m_ptr
%mask = fcmp oeq <32 x float> %m, zeroinitializer
- %val = load <32 x float>, <32 x float>* %val_ptr
- call void @llvm.masked.store.v32f32.p0v32f32(<32 x float> %val, <32 x float>* %a, i32 8, <32 x i1> %mask)
+ %val = load <32 x float>, ptr %val_ptr
+ call void @llvm.masked.store.v32f32.p0v32f32(<32 x float> %val, ptr %a, i32 8, <32 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v32f32.p0v32f32(<32 x float>, <32 x float>*, i32, <32 x i1>)
+declare void @llvm.masked.store.v32f32.p0v32f32(<32 x float>, ptr, i32, <32 x i1>)
-define void @masked_store_v32f64(<32 x double>* %val_ptr, <32 x double>* %a, <32 x double>* %m_ptr) nounwind {
+define void @masked_store_v32f64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32-LABEL: masked_store_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
@@ -440,15 +440,15 @@ define void @masked_store_v32f64(<32 x double>* %val_ptr, <32 x double>* %a, <32
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
- %m = load <32 x double>, <32 x double>* %m_ptr
+ %m = load <32 x double>, ptr %m_ptr
%mask = fcmp oeq <32 x double> %m, zeroinitializer
- %val = load <32 x double>, <32 x double>* %val_ptr
- call void @llvm.masked.store.v32f32.p0v32f64(<32 x double> %val, <32 x double>* %a, i32 8, <32 x i1> %mask)
+ %val = load <32 x double>, ptr %val_ptr
+ call void @llvm.masked.store.v32f32.p0v32f64(<32 x double> %val, ptr %a, i32 8, <32 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v32f32.p0v32f64(<32 x double>, <32 x double>*, i32, <32 x i1>)
+declare void @llvm.masked.store.v32f32.p0v32f64(<32 x double>, ptr, i32, <32 x i1>)
-define void @masked_store_v64f16(<64 x half>* %val_ptr, <64 x half>* %a, <64 x half>* %m_ptr) nounwind {
+define void @masked_store_v64f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v64f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 64
@@ -459,15 +459,15 @@ define void @masked_store_v64f16(<64 x half>* %val_ptr, <64 x half>* %a, <64 x h
; CHECK-NEXT: vmfeq.vf v0, v8, fa5
; CHECK-NEXT: vse16.v v16, (a1), v0.t
; CHECK-NEXT: ret
- %m = load <64 x half>, <64 x half>* %m_ptr
+ %m = load <64 x half>, ptr %m_ptr
%mask = fcmp oeq <64 x half> %m, zeroinitializer
- %val = load <64 x half>, <64 x half>* %val_ptr
- call void @llvm.masked.store.v64f16.p0v64f16(<64 x half> %val, <64 x half>* %a, i32 8, <64 x i1> %mask)
+ %val = load <64 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v64f16.p0v64f16(<64 x half> %val, ptr %a, i32 8, <64 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v64f16.p0v64f16(<64 x half>, <64 x half>*, i32, <64 x i1>)
+declare void @llvm.masked.store.v64f16.p0v64f16(<64 x half>, ptr, i32, <64 x i1>)
-define void @masked_store_v64f32(<64 x float>* %val_ptr, <64 x float>* %a, <64 x float>* %m_ptr) nounwind {
+define void @masked_store_v64f32(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v64f32:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
@@ -508,15 +508,15 @@ define void @masked_store_v64f32(<64 x float>* %val_ptr, <64 x float>* %a, <64 x
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
- %m = load <64 x float>, <64 x float>* %m_ptr
+ %m = load <64 x float>, ptr %m_ptr
%mask = fcmp oeq <64 x float> %m, zeroinitializer
- %val = load <64 x float>, <64 x float>* %val_ptr
- call void @llvm.masked.store.v64f16.p0v64f32(<64 x float> %val, <64 x float>* %a, i32 8, <64 x i1> %mask)
+ %val = load <64 x float>, ptr %val_ptr
+ call void @llvm.masked.store.v64f16.p0v64f32(<64 x float> %val, ptr %a, i32 8, <64 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v64f16.p0v64f32(<64 x float>, <64 x float>*, i32, <64 x i1>)
+declare void @llvm.masked.store.v64f16.p0v64f32(<64 x float>, ptr, i32, <64 x i1>)
-define void @masked_store_v128f16(<128 x half>* %val_ptr, <128 x half>* %a, <128 x half>* %m_ptr) nounwind {
+define void @masked_store_v128f16(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; CHECK-LABEL: masked_store_v128f16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
@@ -557,10 +557,10 @@ define void @masked_store_v128f16(<128 x half>* %val_ptr, <128 x half>* %a, <128
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
- %m = load <128 x half>, <128 x half>* %m_ptr
+ %m = load <128 x half>, ptr %m_ptr
%mask = fcmp oeq <128 x half> %m, zeroinitializer
- %val = load <128 x half>, <128 x half>* %val_ptr
- call void @llvm.masked.store.v128f16.p0v128f16(<128 x half> %val, <128 x half>* %a, i32 8, <128 x i1> %mask)
+ %val = load <128 x half>, ptr %val_ptr
+ call void @llvm.masked.store.v128f16.p0v128f16(<128 x half> %val, ptr %a, i32 8, <128 x i1> %mask)
ret void
}
-declare void @llvm.masked.store.v128f16.p0v128f16(<128 x half>, <128 x half>*, i32, <128 x i1>)
+declare void @llvm.masked.store.v128f16.p0v128f16(<128 x half>, ptr, i32, <128 x i1>)
More information about the llvm-commits
mailing list