[llvm] [CodeGen][PreISelIntrinsicLowering] Add VP-based lowering for memcpy/memmove/memset (PR #165585)

David Del Río via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 4 03:58:28 PST 2025


================
@@ -0,0 +1,174 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
+; RUN: opt -mtriple=riscv64-linux-gnu -mattr=+v -passes=pre-isel-intrinsic-lowering \
+; RUN:   -force-mem-intrinsic-expansion -S < %s | FileCheck %s
+
+define void @memcpy1024_i64(ptr %dst, ptr %src) {
+; CHECK-LABEL: define void @memcpy1024_i64(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[MEMCPY_VEC_LOOP:.*]]
+; CHECK:       [[MEMCPY_VEC_LOOP]]:
+; CHECK-NEXT:    [[SRC_F:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[TMP2:%.*]], %[[MEMCPY_VEC_LOOP]] ]
+; CHECK-NEXT:    [[DST_F:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[MEMCPY_VEC_LOOP]] ]
+; CHECK-NEXT:    [[LEN_F:%.*]] = phi i64 [ 1024, %[[ENTRY]] ], [ [[TMP5:%.*]], %[[MEMCPY_VEC_LOOP]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[LEN_F]], i32 8, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr [[SRC_F]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP0]])
+; CHECK-NEXT:    call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[DST_F]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP0]])
+; CHECK-NEXT:    [[TMP2]] = getelementptr i8, ptr [[SRC_F]], i32 [[TMP0]]
+; CHECK-NEXT:    [[TMP3]] = getelementptr i8, ptr [[DST_F]], i32 [[TMP0]]
+; CHECK-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT:    [[TMP5]] = sub i64 [[LEN_F]], [[TMP4]]
+; CHECK-NEXT:    [[TMP6:%.*]] = icmp ugt i64 [[TMP5]], 0
+; CHECK-NEXT:    br i1 [[TMP6]], label %[[MEMCPY_VEC_LOOP]], label %[[MEMCPY_VEC_EXIT:.*]]
+; CHECK:       [[MEMCPY_VEC_EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @llvm.memcpy.p0.p0.i64(ptr %dst, ptr %src, i64 1024, i1 false)
+  ret void
+}
+
+define void @memcpy1024_i32(ptr %dst, ptr %src) {
+; CHECK-LABEL: define void @memcpy1024_i32(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[MEMCPY_VEC_LOOP:.*]]
+; CHECK:       [[MEMCPY_VEC_LOOP]]:
+; CHECK-NEXT:    [[SRC_F:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[TMP2:%.*]], %[[MEMCPY_VEC_LOOP]] ]
+; CHECK-NEXT:    [[DST_F:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ], [ [[TMP3:%.*]], %[[MEMCPY_VEC_LOOP]] ]
+; CHECK-NEXT:    [[LEN_F:%.*]] = phi i32 [ 1024, %[[ENTRY]] ], [ [[TMP4:%.*]], %[[MEMCPY_VEC_LOOP]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[LEN_F]], i32 8, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr [[SRC_F]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP0]])
+; CHECK-NEXT:    call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[DST_F]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP0]])
+; CHECK-NEXT:    [[TMP2]] = getelementptr i8, ptr [[SRC_F]], i32 [[TMP0]]
+; CHECK-NEXT:    [[TMP3]] = getelementptr i8, ptr [[DST_F]], i32 [[TMP0]]
+; CHECK-NEXT:    [[TMP4]] = sub i32 [[LEN_F]], [[TMP0]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i32 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[MEMCPY_VEC_LOOP]], label %[[MEMCPY_VEC_EXIT:.*]]
+; CHECK:       [[MEMCPY_VEC_EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @llvm.memcpy.p0.p0.i32(ptr %dst, ptr %src, i32 1024, i1 false)
+  ret void
+}
+
+define void @memset1024(ptr %dst, i8 %value) {
+; CHECK-LABEL: define void @memset1024(
+; CHECK-SAME: ptr [[DST:%.*]], i8 [[VALUE:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[MEMSET_VEC_LOOP:.*]]
+; CHECK:       [[MEMSET_VEC_LOOP]]:
+; CHECK-NEXT:    [[SRC_F:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ], [ [[TMP2:%.*]], %[[MEMSET_VEC_LOOP]] ]
+; CHECK-NEXT:    [[LEN_F:%.*]] = phi i64 [ 1024, %[[ENTRY]] ], [ [[TMP4:%.*]], %[[MEMSET_VEC_LOOP]] ]
+; CHECK-NEXT:    [[TMP0:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[LEN_F]], i32 8, i1 true)
+; CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 8 x i8> @llvm.experimental.vp.splat.nxv8i8(i8 [[VALUE]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP0]])
+; CHECK-NEXT:    call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[SRC_F]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP0]])
+; CHECK-NEXT:    [[TMP2]] = getelementptr i8, ptr [[SRC_F]], i32 [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT:    [[TMP4]] = sub i64 [[LEN_F]], [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp ugt i64 [[TMP4]], 0
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[MEMSET_VEC_LOOP]], label %[[MEMSET_VEC_EXIT:.*]]
+; CHECK:       [[MEMSET_VEC_EXIT]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @llvm.memset.p0.i64(ptr %dst, i8 %value, i64 1024, i1 false)
+  ret void
+}
+
+define void @memmove1024_i64(ptr %dst, ptr %src) {
+; CHECK-LABEL: define void @memmove1024_i64(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[COMPARE_ADDR:%.*]] = icmp ult ptr [[SRC]], [[DST]]
+; CHECK-NEXT:    br i1 [[COMPARE_ADDR]], label %[[VEC_BACKWARD:.*]], label %[[VEC_FORWARD:.*]]
+; CHECK:       [[VEC_BACKWARD]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr i8, ptr [[SRC]], i64 1024
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr i8, ptr [[DST]], i64 1024
+; CHECK-NEXT:    br label %[[VEC_BACKWARD_LOOP:.*]]
+; CHECK:       [[VEC_BACKWARD_LOOP]]:
+; CHECK-NEXT:    [[SRC_B:%.*]] = phi ptr [ [[TMP0]], %[[VEC_BACKWARD]] ], [ [[TMP4:%.*]], %[[VEC_BACKWARD_LOOP]] ]
+; CHECK-NEXT:    [[DST_B:%.*]] = phi ptr [ [[TMP1]], %[[VEC_BACKWARD]] ], [ [[TMP6:%.*]], %[[VEC_BACKWARD_LOOP]] ]
+; CHECK-NEXT:    [[LEN_B:%.*]] = phi i64 [ 1024, %[[VEC_BACKWARD]] ], [ [[TMP9:%.*]], %[[VEC_BACKWARD_LOOP]] ]
+; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[LEN_B]], i32 8, i1 true)
+; CHECK-NEXT:    [[TMP3:%.*]] = sub i32 0, [[TMP2]]
+; CHECK-NEXT:    [[TMP4]] = getelementptr i8, ptr [[SRC_B]], i32 [[TMP3]]
+; CHECK-NEXT:    [[TMP5:%.*]] = sub i32 0, [[TMP2]]
+; CHECK-NEXT:    [[TMP6]] = getelementptr i8, ptr [[DST_B]], i32 [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr [[TMP4]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP2]])
+; CHECK-NEXT:    call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP7]], ptr [[TMP6]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP2]])
+; CHECK-NEXT:    [[TMP8:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT:    [[TMP9]] = sub i64 [[LEN_B]], [[TMP8]]
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp ugt i64 [[TMP9]], 0
+; CHECK-NEXT:    br i1 [[TMP10]], label %[[VEC_BACKWARD_LOOP]], label %[[VEC_DONE:.*]]
+; CHECK:       [[VEC_FORWARD]]:
+; CHECK-NEXT:    [[SRC_F:%.*]] = phi ptr [ [[SRC]], %[[ENTRY]] ], [ [[TMP13:%.*]], %[[VEC_FORWARD]] ]
+; CHECK-NEXT:    [[DST_F:%.*]] = phi ptr [ [[DST]], %[[ENTRY]] ], [ [[TMP14:%.*]], %[[VEC_FORWARD]] ]
+; CHECK-NEXT:    [[LEN_F:%.*]] = phi i64 [ 1024, %[[ENTRY]] ], [ [[TMP16:%.*]], %[[VEC_FORWARD]] ]
+; CHECK-NEXT:    [[TMP11:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[LEN_F]], i32 8, i1 true)
+; CHECK-NEXT:    [[TMP12:%.*]] = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0(ptr [[SRC_F]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT:    call void @llvm.vp.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP12]], ptr [[DST_F]], <vscale x 8 x i1> splat (i1 true), i32 [[TMP11]])
+; CHECK-NEXT:    [[TMP13]] = getelementptr i8, ptr [[SRC_F]], i32 [[TMP11]]
+; CHECK-NEXT:    [[TMP14]] = getelementptr i8, ptr [[DST_F]], i32 [[TMP11]]
+; CHECK-NEXT:    [[TMP15:%.*]] = zext i32 [[TMP11]] to i64
+; CHECK-NEXT:    [[TMP16]] = sub i64 [[LEN_F]], [[TMP15]]
+; CHECK-NEXT:    [[TMP17:%.*]] = icmp ugt i64 [[TMP16]], 0
+; CHECK-NEXT:    br i1 [[TMP17]], label %[[VEC_FORWARD]], label %[[VEC_DONE]]
+; CHECK:       [[VEC_DONE]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  call void @llvm.memmove.p0.p0.i64(ptr %dst, ptr %src, i64 1024, i1 false)
+  ret void
+}
+
+define void @memmove1024_i32(ptr %dst, ptr %src) {
+; CHECK-LABEL: define void @memmove1024_i32(
+; CHECK-SAME: ptr [[DST:%.*]], ptr [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[COMPARE_ADDR:%.*]] = icmp ult ptr [[SRC]], [[DST]]
----------------
dadra-oc wrote:

Yeah, forward copy is the most performant one, but depending on the overlap we will need to do backward copy for correctness. 

https://github.com/llvm/llvm-project/pull/165585


More information about the llvm-commits mailing list