[llvm] InferAddressSpaces: Handle prefetch intrinsic (PR #101982)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 5 06:42:26 PDT 2024


https://github.com/arsenm created https://github.com/llvm/llvm-project/pull/101982

None

>From a2ad627bea1ba8aeba91d39555497692b8f96385 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 5 Aug 2024 10:40:34 +0400
Subject: [PATCH 1/2] AMDGPU: Regenerate test checks

---
 .../AMDGPU/promote-kernel-arguments.ll        | 38 +++++++++----------
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/llvm/test/CodeGen/AMDGPU/promote-kernel-arguments.ll b/llvm/test/CodeGen/AMDGPU/promote-kernel-arguments.ll
index c9686e661fe14..8247f883661af 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-kernel-arguments.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-kernel-arguments.ll
@@ -11,9 +11,9 @@ define amdgpu_kernel void @ptr_nest_3(ptr addrspace(1) nocapture readonly %Arg)
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[I:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARG:%.*]], i32 [[I]]
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber [[META0:![0-9]+]]
 ; CHECK-NEXT:    [[P2_GLOBAL:%.*]] = addrspacecast ptr [[P2]] to ptr addrspace(1)
-; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(1) [[P2_GLOBAL]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(1) [[P2_GLOBAL]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P3_GLOBAL:%.*]] = addrspacecast ptr [[P3]] to ptr addrspace(1)
 ; CHECK-NEXT:    store float 0.000000e+00, ptr addrspace(1) [[P3_GLOBAL]], align 4
 ; CHECK-NEXT:    ret void
@@ -36,7 +36,7 @@ define amdgpu_kernel void @ptr_bitcast(ptr nocapture readonly %Arg) {
 ; CHECK-NEXT:    [[ARG_GLOBAL:%.*]] = addrspacecast ptr [[ARG:%.*]] to ptr addrspace(1)
 ; CHECK-NEXT:    [[I:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARG_GLOBAL]], i32 [[I]]
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P2_GLOBAL:%.*]] = addrspacecast ptr [[P2]] to ptr addrspace(1)
 ; CHECK-NEXT:    store i32 0, ptr addrspace(1) [[P2_GLOBAL]], align 4
 ; CHECK-NEXT:    ret void
@@ -57,7 +57,7 @@ entry:
 define amdgpu_kernel void @ptr_in_struct(ptr addrspace(1) nocapture readonly %Arg) {
 ; CHECK-LABEL: @ptr_in_struct(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[P1:%.*]] = load ptr, ptr addrspace(1) [[ARG:%.*]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P1:%.*]] = load ptr, ptr addrspace(1) [[ARG:%.*]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P1_GLOBAL:%.*]] = addrspacecast ptr [[P1]] to ptr addrspace(1)
 ; CHECK-NEXT:    [[ID:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[P1_GLOBAL]], i32 [[ID]]
@@ -86,9 +86,9 @@ define amdgpu_kernel void @flat_ptr_arg(ptr nocapture readonly noalias %Arg, ptr
 ; CHECK-NEXT:    [[I:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARG_GLOBAL]], i64 [[IDXPROM]]
-; CHECK-NEXT:    [[I1:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX10]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[I1:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX10]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[I1_GLOBAL:%.*]] = addrspacecast ptr [[I1]] to ptr addrspace(1)
-; CHECK-NEXT:    [[I2:%.*]] = load float, ptr addrspace(1) [[I1_GLOBAL]], align 4, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[I2:%.*]] = load float, ptr addrspace(1) [[I1_GLOBAL]], align 4, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[ARRAYIDX512:%.*]] = getelementptr inbounds [4 x float], ptr addrspace(3) @LDS, i32 0, i32 [[X:%.*]]
 ; CHECK-NEXT:    store float [[I2]], ptr addrspace(3) [[ARRAYIDX512]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[I1_GLOBAL]], i64 1
@@ -110,7 +110,7 @@ define amdgpu_kernel void @flat_ptr_arg(ptr nocapture readonly noalias %Arg, ptr
 ; CHECK-NEXT:    [[ARRAYIDX711:%.*]] = getelementptr inbounds [4 x float], ptr addrspace(3) @LDS, i32 0, i32 [[SUB]]
 ; CHECK-NEXT:    [[I6:%.*]] = load float, ptr addrspace(3) [[ARRAYIDX711]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[OUT_GLOBAL]], i64 [[IDXPROM]]
-; CHECK-NEXT:    [[I7:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX11]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[I7:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX11]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[I7_GLOBAL:%.*]] = addrspacecast ptr [[I7]] to ptr addrspace(1)
 ; CHECK-NEXT:    [[IDXPROM8:%.*]] = sext i32 [[X]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[I7_GLOBAL]], i64 [[IDXPROM8]]
@@ -161,9 +161,9 @@ define amdgpu_kernel void @global_ptr_arg(ptr addrspace(1) nocapture readonly %A
 ; CHECK-NEXT:    [[I:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARG:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT:    [[I1:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX10]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[I1:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX10]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[I1_GLOBAL:%.*]] = addrspacecast ptr [[I1]] to ptr addrspace(1)
-; CHECK-NEXT:    [[I2:%.*]] = load float, ptr addrspace(1) [[I1_GLOBAL]], align 4, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[I2:%.*]] = load float, ptr addrspace(1) [[I1_GLOBAL]], align 4, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[ARRAYIDX512:%.*]] = getelementptr inbounds [4 x float], ptr addrspace(3) @LDS, i32 0, i32 [[X:%.*]]
 ; CHECK-NEXT:    store float [[I2]], ptr addrspace(3) [[ARRAYIDX512]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[I1_GLOBAL]], i64 1
@@ -276,7 +276,7 @@ define amdgpu_kernel void @global_ptr_arg_clobbered_after_load(ptr addrspace(1)
 ; CHECK-NEXT:    [[I:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[IDXPROM:%.*]] = zext i32 [[I]] to i64
 ; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARG:%.*]], i64 [[IDXPROM]]
-; CHECK-NEXT:    [[I1:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX10]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[I1:%.*]] = load ptr, ptr addrspace(1) [[ARRAYIDX10]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[I1_GLOBAL:%.*]] = addrspacecast ptr [[I1]] to ptr addrspace(1)
 ; CHECK-NEXT:    [[ARRAYIDX11:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARRAYIDX10]], i32 [[X:%.*]]
 ; CHECK-NEXT:    store ptr null, ptr addrspace(1) [[ARRAYIDX11]], align 4
@@ -319,9 +319,9 @@ define amdgpu_kernel void @ptr_nest_3_barrier(ptr addrspace(1) nocapture readonl
 ; CHECK-NEXT:    [[I:%.*]] = tail call i32 @llvm.amdgcn.workitem.id.x()
 ; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARG:%.*]], i32 [[I]]
 ; CHECK-NEXT:    tail call void @llvm.amdgcn.s.barrier()
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P2_GLOBAL:%.*]] = addrspacecast ptr [[P2]] to ptr addrspace(1)
-; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(1) [[P2_GLOBAL]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(1) [[P2_GLOBAL]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P3_GLOBAL:%.*]] = addrspacecast ptr [[P3]] to ptr addrspace(1)
 ; CHECK-NEXT:    store float 0.000000e+00, ptr addrspace(1) [[P3_GLOBAL]], align 4
 ; CHECK-NEXT:    ret void
@@ -345,7 +345,7 @@ define amdgpu_kernel void @flat_ptr_nest_2(ptr nocapture readonly %Arg, i32 %i)
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[ARG_GLOBAL:%.*]] = addrspacecast ptr [[ARG:%.*]] to ptr addrspace(1)
 ; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds ptr, ptr addrspace(1) [[ARG_GLOBAL]], i32 [[I:%.*]]
-; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P2:%.*]] = load ptr, ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P2_GLOBAL:%.*]] = addrspacecast ptr [[P2]] to ptr addrspace(1)
 ; CHECK-NEXT:    store float 0.000000e+00, ptr addrspace(1) [[P2_GLOBAL]], align 4
 ; CHECK-NEXT:    ret void
@@ -366,8 +366,8 @@ define amdgpu_kernel void @const_ptr_nest_3(ptr addrspace(4) nocapture readonly
 ; CHECK-LABEL: @const_ptr_nest_3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[ARG:%.*]], i32 [[I:%.*]]
-; CHECK-NEXT:    [[P2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[P1]], align 8, !amdgpu.noclobber !0
-; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(4) [[P2]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[P1]], align 8, !amdgpu.noclobber [[META0]]
+; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(4) [[P2]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[TMP0:%.*]] = addrspacecast ptr [[P3]] to ptr addrspace(1)
 ; CHECK-NEXT:    store float 0.000000e+00, ptr addrspace(1) [[TMP0]], align 4
 ; CHECK-NEXT:    ret void
@@ -389,8 +389,8 @@ define amdgpu_kernel void @cast_from_const_const_ptr_nest_3(ptr addrspace(4) noc
 ; CHECK-LABEL: @cast_from_const_const_ptr_nest_3(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds ptr addrspace(4), ptr addrspace(4) [[ARG:%.*]], i32 [[I:%.*]]
-; CHECK-NEXT:    [[P2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[P1]], align 8, !amdgpu.noclobber !0
-; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(4) [[P2]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P2:%.*]] = load ptr addrspace(4), ptr addrspace(4) [[P1]], align 8, !amdgpu.noclobber [[META0]]
+; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(4) [[P2]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P3_GLOBAL:%.*]] = addrspacecast ptr [[P3]] to ptr addrspace(1)
 ; CHECK-NEXT:    store float 0.000000e+00, ptr addrspace(1) [[P3_GLOBAL]], align 4
 ; CHECK-NEXT:    ret void
@@ -457,8 +457,8 @@ define amdgpu_kernel void @cast_changing_pointee_type(ptr addrspace(1) nocapture
 ; CHECK-LABEL: @cast_changing_pointee_type(
 ; CHECK-NEXT:  entry:
 ; CHECK-NEXT:    [[P1:%.*]] = getelementptr inbounds ptr addrspace(1), ptr addrspace(1) [[ARG:%.*]], i32 [[I:%.*]]
-; CHECK-NEXT:    [[P2:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber !0
-; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(1) [[P2]], align 8, !amdgpu.noclobber !0
+; CHECK-NEXT:    [[P2:%.*]] = load ptr addrspace(1), ptr addrspace(1) [[P1]], align 8, !amdgpu.noclobber [[META0]]
+; CHECK-NEXT:    [[P3:%.*]] = load ptr, ptr addrspace(1) [[P2]], align 8, !amdgpu.noclobber [[META0]]
 ; CHECK-NEXT:    [[P3_GLOBAL:%.*]] = addrspacecast ptr [[P3]] to ptr addrspace(1)
 ; CHECK-NEXT:    store float 0.000000e+00, ptr addrspace(1) [[P3_GLOBAL]], align 4
 ; CHECK-NEXT:    ret void

>From 5955b3a654062ebb4bbf5f399c1bf7168ec49228 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Mon, 5 Aug 2024 11:25:58 +0400
Subject: [PATCH 2/2] InferAddressSpaces: Handle prefetch intrinsic

---
 .../Transforms/Scalar/InferAddressSpaces.cpp  |  8 +++
 .../InferAddressSpaces/AMDGPU/prefetch.ll     | 59 +++++++++++++++++++
 2 files changed, 67 insertions(+)
 create mode 100644 llvm/test/Transforms/InferAddressSpaces/AMDGPU/prefetch.ll

diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index 830c15249582c..87b885447cc02 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -401,6 +401,13 @@ bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst *II,
     II->setCalledFunction(NewDecl);
     return true;
   }
+  case Intrinsic::prefetch: {
+    Function *NewDecl =
+        Intrinsic::getDeclaration(M, II->getIntrinsicID(), {NewV->getType()});
+    II->setArgOperand(0, NewV);
+    II->setCalledFunction(NewDecl);
+    return true;
+  }
   default: {
     Value *Rewrite = TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
     if (!Rewrite)
@@ -423,6 +430,7 @@ void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
                                                  PostorderStack, Visited);
     break;
   case Intrinsic::masked_gather:
+  case Intrinsic::prefetch:
     appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
                                                  PostorderStack, Visited);
     break;
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/prefetch.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/prefetch.ll
new file mode 100644
index 0000000000000..e2c12c4c37141
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/prefetch.ll
@@ -0,0 +1,59 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=infer-address-spaces %s | FileCheck %s
+
+define void @prefetch_shared_to_flat(ptr addrspace(3) %group.ptr) {
+; CHECK-LABEL: define void @prefetch_shared_to_flat(
+; CHECK-SAME: ptr addrspace(3) [[GROUP_PTR:%.*]]) {
+; CHECK-NEXT:    tail call void @llvm.prefetch.p3(ptr addrspace(3) [[GROUP_PTR]], i32 0, i32 0, i32 1)
+; CHECK-NEXT:    ret void
+;
+  %cast = addrspacecast ptr addrspace(3) %group.ptr to ptr
+  tail call void @llvm.prefetch.p0(ptr %cast, i32 0, i32 0, i32 1)
+  ret void
+}
+
+define void @prefetch_global_to_flat(ptr addrspace(1) %global.ptr) {
+; CHECK-LABEL: define void @prefetch_global_to_flat(
+; CHECK-SAME: ptr addrspace(1) [[GLOBAL_PTR:%.*]]) {
+; CHECK-NEXT:    tail call void @llvm.prefetch.p1(ptr addrspace(1) [[GLOBAL_PTR]], i32 0, i32 0, i32 1)
+; CHECK-NEXT:    ret void
+;
+  %cast = addrspacecast ptr addrspace(1) %global.ptr to ptr
+  tail call void @llvm.prefetch.p0(ptr addrspace(0) %cast, i32 0, i32 0, i32 1)
+  ret void
+}
+
+define void @prefetch_constant_to_flat(ptr addrspace(4) %const.ptr) {
+; CHECK-LABEL: define void @prefetch_constant_to_flat(
+; CHECK-SAME: ptr addrspace(4) [[CONST_PTR:%.*]]) {
+; CHECK-NEXT:    tail call void @llvm.prefetch.p4(ptr addrspace(4) [[CONST_PTR]], i32 0, i32 0, i32 1)
+; CHECK-NEXT:    ret void
+;
+  %cast = addrspacecast ptr addrspace(4) %const.ptr to ptr
+  tail call void @llvm.prefetch.p0(ptr %cast, i32 0, i32 0, i32 1)
+  ret void
+}
+
+define void @prefetch_flat_to_shared(ptr %flat.ptr) {
+; CHECK-LABEL: define void @prefetch_flat_to_shared(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr [[FLAT_PTR]] to ptr addrspace(3)
+; CHECK-NEXT:    tail call void @llvm.prefetch.p3(ptr addrspace(3) [[CAST]], i32 0, i32 0, i32 1)
+; CHECK-NEXT:    ret void
+;
+  %cast = addrspacecast ptr %flat.ptr to ptr addrspace(3)
+  tail call void @llvm.prefetch.p3(ptr addrspace(3) %cast, i32 0, i32 0, i32 1)
+  ret void
+}
+
+define void @prefetch_flat_to_global(ptr %flat.ptr) {
+; CHECK-LABEL: define void @prefetch_flat_to_global(
+; CHECK-SAME: ptr [[FLAT_PTR:%.*]]) {
+; CHECK-NEXT:    [[CAST:%.*]] = addrspacecast ptr [[FLAT_PTR]] to ptr addrspace(1)
+; CHECK-NEXT:    tail call void @llvm.prefetch.p1(ptr addrspace(1) [[CAST]], i32 0, i32 0, i32 1)
+; CHECK-NEXT:    ret void
+;
+  %cast = addrspacecast ptr %flat.ptr to ptr addrspace(1)
+  tail call void @llvm.prefetch.p1(ptr addrspace(1) %cast, i32 0, i32 0, i32 1)
+  ret void
+}



More information about the llvm-commits mailing list