[llvm] [LowerMemIntrinsics] Lower llvm.memmove to wide memory accesses (PR #100122)

Fabian Ritter via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 25 02:28:52 PDT 2024


================
@@ -1783,30 +1909,51 @@ define amdgpu_kernel void @memmove_volatile(ptr addrspace(1) %dst, ptr addrspace
 ;
 ; ALL-LABEL: @memmove_volatile(
 ; ALL-NEXT:    [[COMPARE_SRC_DST:%.*]] = icmp ult ptr addrspace(1) [[SRC:%.*]], [[DST:%.*]]
-; ALL-NEXT:    [[COMPARE_N_TO_0:%.*]] = icmp eq i64 64, 0
-; ALL-NEXT:    br i1 [[COMPARE_SRC_DST]], label [[COPY_BACKWARDS:%.*]], label [[COPY_FORWARD:%.*]]
-; ALL:       copy_backwards:
-; ALL-NEXT:    br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE:%.*]], label [[COPY_BACKWARDS_LOOP:%.*]]
-; ALL:       copy_backwards_loop:
-; ALL-NEXT:    [[TMP1:%.*]] = phi i64 [ [[INDEX_PTR:%.*]], [[COPY_BACKWARDS_LOOP]] ], [ 64, [[COPY_BACKWARDS]] ]
-; ALL-NEXT:    [[INDEX_PTR]] = sub i64 [[TMP1]], 1
-; ALL-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[SRC]], i64 [[INDEX_PTR]]
+; ALL-NEXT:    br i1 [[COMPARE_SRC_DST]], label [[MEMMOVE_COPY_BACKWARDS:%.*]], label [[MEMMOVE_COPY_FORWARD:%.*]]
+; ALL:       memmove_copy_backwards:
+; ALL-NEXT:    br i1 true, label [[MEMMOVE_BWD_MIDDLE:%.*]], label [[MEMMOVE_BWD_RESIDUAL_LOOP:%.*]]
+; ALL:       memmove_bwd_residual_loop:
+; ALL-NEXT:    [[TMP1:%.*]] = phi i64 [ [[BWD_RESIDUAL_INDEX:%.*]], [[MEMMOVE_BWD_RESIDUAL_LOOP]] ], [ 64, [[MEMMOVE_COPY_BACKWARDS]] ]
+; ALL-NEXT:    [[BWD_RESIDUAL_INDEX]] = sub i64 [[TMP1]], 1
+; ALL-NEXT:    [[TMP2:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[SRC]], i64 [[BWD_RESIDUAL_INDEX]]
 ; ALL-NEXT:    [[ELEMENT:%.*]] = load volatile i8, ptr addrspace(1) [[TMP2]], align 1
-; ALL-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[DST]], i64 [[INDEX_PTR]]
+; ALL-NEXT:    [[TMP3:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[DST]], i64 [[BWD_RESIDUAL_INDEX]]
 ; ALL-NEXT:    store volatile i8 [[ELEMENT]], ptr addrspace(1) [[TMP3]], align 1
-; ALL-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_PTR]], 0
-; ALL-NEXT:    br i1 [[TMP4]], label [[MEMMOVE_DONE]], label [[COPY_BACKWARDS_LOOP]]
-; ALL:       copy_forward:
-; ALL-NEXT:    br i1 [[COMPARE_N_TO_0]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP:%.*]]
-; ALL:       copy_forward_loop:
-; ALL-NEXT:    [[INDEX_PTR1:%.*]] = phi i64 [ [[INDEX_INCREMENT:%.*]], [[COPY_FORWARD_LOOP]] ], [ 0, [[COPY_FORWARD]] ]
-; ALL-NEXT:    [[TMP5:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[SRC]], i64 [[INDEX_PTR1]]
-; ALL-NEXT:    [[ELEMENT2:%.*]] = load volatile i8, ptr addrspace(1) [[TMP5]], align 1
-; ALL-NEXT:    [[TMP6:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[DST]], i64 [[INDEX_PTR1]]
-; ALL-NEXT:    store volatile i8 [[ELEMENT2]], ptr addrspace(1) [[TMP6]], align 1
-; ALL-NEXT:    [[INDEX_INCREMENT]] = add i64 [[INDEX_PTR1]], 1
-; ALL-NEXT:    [[TMP7:%.*]] = icmp eq i64 [[INDEX_INCREMENT]], 64
-; ALL-NEXT:    br i1 [[TMP7]], label [[MEMMOVE_DONE]], label [[COPY_FORWARD_LOOP]]
+; ALL-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[BWD_RESIDUAL_INDEX]], 64
+; ALL-NEXT:    br i1 [[TMP4]], label [[MEMMOVE_BWD_MIDDLE]], label [[MEMMOVE_BWD_RESIDUAL_LOOP]]
+; ALL:       memmove_bwd_middle:
+; ALL-NEXT:    br i1 false, label [[MEMMOVE_DONE:%.*]], label [[MEMMOVE_BWD_MAIN_LOOP:%.*]]
+; ALL:       memmove_bwd_main_loop:
+; ALL-NEXT:    [[TMP5:%.*]] = phi i64 [ [[BWD_MAIN_INDEX:%.*]], [[MEMMOVE_BWD_MAIN_LOOP]] ], [ 4, [[MEMMOVE_BWD_MIDDLE]] ]
+; ALL-NEXT:    [[BWD_MAIN_INDEX]] = sub i64 [[TMP5]], 1
+; ALL-NEXT:    [[TMP6:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[SRC]], i64 [[BWD_MAIN_INDEX]]
+; ALL-NEXT:    [[ELEMENT1:%.*]] = load volatile <4 x i32>, ptr addrspace(1) [[TMP6]], align 1
+; ALL-NEXT:    [[TMP7:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[DST]], i64 [[BWD_MAIN_INDEX]]
+; ALL-NEXT:    store volatile <4 x i32> [[ELEMENT1]], ptr addrspace(1) [[TMP7]], align 1
+; ALL-NEXT:    [[TMP8:%.*]] = icmp eq i64 [[BWD_MAIN_INDEX]], 0
+; ALL-NEXT:    br i1 [[TMP8]], label [[MEMMOVE_DONE]], label [[MEMMOVE_BWD_MAIN_LOOP]]
+; ALL:       memmove_copy_forward:
+; ALL-NEXT:    br i1 false, label [[MEMMOVE_FWD_MIDDLE:%.*]], label [[MEMMOVE_FWD_MAIN_LOOP:%.*]]
+; ALL:       memmove_fwd_main_loop:
+; ALL-NEXT:    [[FWD_MAIN_INDEX:%.*]] = phi i64 [ [[TMP11:%.*]], [[MEMMOVE_FWD_MAIN_LOOP]] ], [ 0, [[MEMMOVE_COPY_FORWARD]] ]
+; ALL-NEXT:    [[TMP9:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[SRC]], i64 [[FWD_MAIN_INDEX]]
+; ALL-NEXT:    [[ELEMENT2:%.*]] = load volatile <4 x i32>, ptr addrspace(1) [[TMP9]], align 1
+; ALL-NEXT:    [[TMP10:%.*]] = getelementptr inbounds <4 x i32>, ptr addrspace(1) [[DST]], i64 [[FWD_MAIN_INDEX]]
+; ALL-NEXT:    store volatile <4 x i32> [[ELEMENT2]], ptr addrspace(1) [[TMP10]], align 1
+; ALL-NEXT:    [[TMP11]] = add i64 [[FWD_MAIN_INDEX]], 1
+; ALL-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[TMP11]], 4
+; ALL-NEXT:    br i1 [[TMP12]], label [[MEMMOVE_FWD_MIDDLE]], label [[MEMMOVE_FWD_MAIN_LOOP]]
+; ALL:       memmove_fwd_middle:
+; ALL-NEXT:    br i1 true, label [[MEMMOVE_DONE]], label [[MEMMOVE_FWD_RESIDUAL_LOOP:%.*]]
+; ALL:       memmove_fwd_residual_loop:
+; ALL-NEXT:    [[FWD_RESIDUAL_INDEX:%.*]] = phi i64 [ [[TMP15:%.*]], [[MEMMOVE_FWD_RESIDUAL_LOOP]] ], [ 64, [[MEMMOVE_FWD_MIDDLE]] ]
+; ALL-NEXT:    [[TMP13:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[SRC]], i64 [[FWD_RESIDUAL_INDEX]]
+; ALL-NEXT:    [[ELEMENT3:%.*]] = load volatile i8, ptr addrspace(1) [[TMP13]], align 1
+; ALL-NEXT:    [[TMP14:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[DST]], i64 [[FWD_RESIDUAL_INDEX]]
+; ALL-NEXT:    store volatile i8 [[ELEMENT3]], ptr addrspace(1) [[TMP14]], align 1
+; ALL-NEXT:    [[TMP15]] = add i64 [[FWD_RESIDUAL_INDEX]], 1
+; ALL-NEXT:    [[TMP16:%.*]] = icmp eq i64 [[TMP15]], 64
+; ALL-NEXT:    br i1 [[TMP16]], label [[MEMMOVE_DONE]], label [[MEMMOVE_FWD_RESIDUAL_LOOP]]
 ; ALL:       memmove_done:
 ; ALL-NEXT:    ret void
 ;
----------------
ritter-x2a wrote:

The emitted assembly for AS0<->1 and AS0<->5 does not use flat instructions for the global/private accesses, so the information seems to be recovered. I think it would still be better to use the original address spaces for the accesses here, otherwise the address-space-dependent lowering types don't make much sense.
47abbd7 implements that.

https://github.com/llvm/llvm-project/pull/100122


More information about the llvm-commits mailing list