[llvm] 4600e21 - [AArch64][SVE] Drop "argmemonly" from gather/scatter with vector base.
    Eli Friedman via llvm-commits 
    llvm-commits at lists.llvm.org
       
    Fri Sep 25 16:01:26 PDT 2020
    
    
  
Author: Eli Friedman
Date: 2020-09-25T16:01:05-07:00
New Revision: 4600e210514281d2ac049e1c46d3f10bd17bf25c
URL: https://github.com/llvm/llvm-project/commit/4600e210514281d2ac049e1c46d3f10bd17bf25c
DIFF: https://github.com/llvm/llvm-project/commit/4600e210514281d2ac049e1c46d3f10bd17bf25c.diff
LOG: [AArch64][SVE] Drop "argmemonly" from gather/scatter with vector base.
The intrinsics don't have any pointer arguments, so "argmemonly" makes
optimizations think they don't write to memory at all.
Differential Revision: https://reviews.llvm.org/D88186
Added: 
    
Modified: 
    llvm/include/llvm/IR/IntrinsicsAArch64.td
    llvm/test/Transforms/LICM/AArch64/sve-load-hoist.ll
Removed: 
    
################################################################################
diff  --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td
index c1b780be17c6..248b4c429d6e 100644
--- a/llvm/include/llvm/IR/IntrinsicsAArch64.td
+++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td
@@ -1300,7 +1300,7 @@ class AdvSIMD_GatherLoad_VS_Intrinsic
                   llvm_anyvector_ty,
                   llvm_i64_ty
                 ],
-                [IntrReadMem, IntrArgMemOnly]>;
+                [IntrReadMem]>;
 
 class AdvSIMD_ScatterStore_SV_64b_Offsets_Intrinsic
     : Intrinsic<[],
@@ -1329,7 +1329,7 @@ class AdvSIMD_ScatterStore_VS_Intrinsic
                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
                  llvm_anyvector_ty, llvm_i64_ty
                ],
-               [IntrWriteMem, IntrArgMemOnly]>;
+               [IntrWriteMem]>;
 
 
 class SVE_gather_prf_SV
diff  --git a/llvm/test/Transforms/LICM/AArch64/sve-load-hoist.ll b/llvm/test/Transforms/LICM/AArch64/sve-load-hoist.ll
index b0fcdb7d8dfc..c6c39a644665 100644
--- a/llvm/test/Transforms/LICM/AArch64/sve-load-hoist.ll
+++ b/llvm/test/Transforms/LICM/AArch64/sve-load-hoist.ll
@@ -26,5 +26,54 @@ for.end:
   ret void
 }
 
+define void @no_hoist_gather(<vscale x 2 x i32>* %out_ptr, <vscale x 2 x i32>* %in_ptr, <vscale x 2 x i64> %ptr_vec, i64 %n, <vscale x 2 x i1> %pred) {
+; CHECK-LABEL: @no_hoist_gather(
+; CHECK: entry:
+; CHECK-NOT: llvm.aarch64.sve.ld1.gather.scalar.offset
+; CHECK: for.body:
+; CHECK: llvm.aarch64.sve.ld1.gather.scalar.offset
+entry:
+  br label %for.body
+
+for.body:
+  %i = phi i64 [0, %entry], [%inc, %for.body]
+  %gather = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> %pred, <vscale x 2 x i64> %ptr_vec, i64 0)
+  %in_ptr_gep = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %in_ptr, i64 %i
+  %in_ptr_load = load <vscale x 2 x i32>, <vscale x 2 x i32>* %in_ptr_gep, align 8
+  %sum = add <vscale x 2 x i32> %gather, %in_ptr_load
+  %out_ptr_gep = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %out_ptr, i64 %i
+  store  <vscale x 2 x i32> %sum, <vscale x 2 x i32>* %out_ptr_gep, align 8
+  %inc = add nuw nsw i64 %i, 1
+  %cmp = icmp ult i64 %inc, %n
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+  ret void
+}
+
+define void @no_hoist_scatter(<vscale x 2 x i32>* %out_ptr, <vscale x 2 x i32>* %in_ptr, <vscale x 2 x i64> %ptr_vec, i64 %n, <vscale x 2 x i1> %pred) {
+; CHECK-LABEL: @no_hoist_scatter(
+; CHECK: entry:
+; CHECK-NOT: load
+; CHECK: for.body:
+; CHECK: load
+entry:
+  br label %for.body
+
+for.body:
+  %i = phi i64 [0, %entry], [%inc, %for.body]
+  %in_ptr_load = load <vscale x 2 x i32>, <vscale x 2 x i32>* %in_ptr, align 8
+  call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32> %in_ptr_load, <vscale x 2 x i1> %pred, <vscale x 2 x i64> %ptr_vec, i64 %i)
+  %inc = add nuw nsw i64 %i, 1
+  %cmp = icmp ult i64 %inc, %n
+  br i1 %cmp, label %for.body, label %for.end
+
+for.end:
+  ret void
+}
+
 declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly
 
+declare void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i32>, <vscale x 2 x i1>, <vscale x 2 x i64>, i64)
+
+declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
        
    
    
More information about the llvm-commits
mailing list