[llvm] 84feca6 - [MemCpyOpt] Add tests from D40802 (NFC)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Oct 2 11:28:50 PDT 2020


Author: Nikita Popov
Date: 2020-10-02T20:28:38+02:00
New Revision: 84feca6a84d90c5c0b8ecbcffc68e8e4b1285f32

URL: https://github.com/llvm/llvm-project/commit/84feca6a84d90c5c0b8ecbcffc68e8e4b1285f32
DIFF: https://github.com/llvm/llvm-project/commit/84feca6a84d90c5c0b8ecbcffc68e8e4b1285f32.diff

LOG: [MemCpyOpt] Add tests from D40802 (NFC)

Even though that patch didn't stick, we should retain the test
coverage.

Added: 
    llvm/test/Transforms/MemCpyOpt/memcpy-invoke-memcpy.ll
    llvm/test/Transforms/MemCpyOpt/merge-into-memset.ll
    llvm/test/Transforms/MemCpyOpt/mixed-sizes.ll
    llvm/test/Transforms/MemCpyOpt/nonlocal-memcpy-memcpy.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/MemCpyOpt/memcpy-invoke-memcpy.ll b/llvm/test/Transforms/MemCpyOpt/memcpy-invoke-memcpy.ll
new file mode 100644
index 000000000000..6a2529d03430
--- /dev/null
+++ b/llvm/test/Transforms/MemCpyOpt/memcpy-invoke-memcpy.ll
@@ -0,0 +1,76 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -memcpyopt -S | FileCheck %s
+
+; Test memcpy-memcpy dependencies across invoke edges.
+
+; Test that memcpyopt works across the non-unwind edge of an invoke.
+; TODO: Not supported yet.
+
+define hidden void @test_normal(i8* noalias %dst, i8* %src) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+; CHECK-LABEL: @test_normal(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TEMP:%.*]] = alloca i8, i32 64, align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TEMP]], i8* nonnull align 8 [[SRC:%.*]], i64 64, i1 false)
+; CHECK-NEXT:    invoke void @invoke_me()
+; CHECK-NEXT:    to label [[TRY_CONT:%.*]] unwind label [[LPAD:%.*]]
+; CHECK:       lpad:
+; CHECK-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    catch i8* null
+; CHECK-NEXT:    ret void
+; CHECK:       try.cont:
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DST:%.*]], i8* align 8 [[TEMP]], i64 64, i1 false)
+; CHECK-NEXT:    ret void
+;
+entry:
+  %temp = alloca i8, i32 64
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %temp, i8* nonnull align 8 %src, i64 64, i1 false)
+  invoke void @invoke_me()
+  to label %try.cont unwind label %lpad
+
+lpad:
+  landingpad { i8*, i32 }
+  catch i8* null
+  ret void
+
+try.cont:
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %dst, i8* align 8 %temp, i64 64, i1 false)
+  ret void
+}
+
+; Test that memcpyopt works across the unwind edge of an invoke.
+; TODO: Not supported yet.
+
+define hidden void @test_unwind(i8* noalias %dst, i8* %src) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+; CHECK-LABEL: @test_unwind(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TEMP:%.*]] = alloca i8, i32 64, align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TEMP]], i8* nonnull align 8 [[SRC:%.*]], i64 64, i1 false)
+; CHECK-NEXT:    invoke void @invoke_me()
+; CHECK-NEXT:    to label [[TRY_CONT:%.*]] unwind label [[LPAD:%.*]]
+; CHECK:       lpad:
+; CHECK-NEXT:    [[TMP0:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    catch i8* null
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DST:%.*]], i8* align 8 [[TEMP]], i64 64, i1 false)
+; CHECK-NEXT:    ret void
+; CHECK:       try.cont:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %temp = alloca i8, i32 64
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %temp, i8* nonnull align 8 %src, i64 64, i1 false)
+  invoke void @invoke_me()
+  to label %try.cont unwind label %lpad
+
+lpad:
+  landingpad { i8*, i32 }
+  catch i8* null
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %dst, i8* align 8 %temp, i64 64, i1 false)
+  ret void
+
+try.cont:
+  ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare i32 @__gxx_personality_v0(...)
+declare void @invoke_me() readnone

diff  --git a/llvm/test/Transforms/MemCpyOpt/merge-into-memset.ll b/llvm/test/Transforms/MemCpyOpt/merge-into-memset.ll
new file mode 100644
index 000000000000..af3fe4155f91
--- /dev/null
+++ b/llvm/test/Transforms/MemCpyOpt/merge-into-memset.ll
@@ -0,0 +1,42 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -memcpyopt -S | FileCheck %s
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Don't delete the memcpy in %if.then, even though it depends on an instruction
+; which will be deleted.
+
+define void @foo(i1 %c, i8* %d, i8* %e, i8* %f) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP:%.*]] = alloca [50 x i8], align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = bitcast [50 x i8]* [[TMP]] to i8*
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds i8, i8* [[TMP4]], i64 1
+; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* nonnull [[D:%.*]], i8 0, i64 10, i1 false)
+; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP4]], i8 0, i64 11, i1 false)
+; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_THEN:%.*]], label [[EXIT:%.*]]
+; CHECK:       if.then:
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[F:%.*]], i8* nonnull align 8 [[TMP4]], i64 30, i1 false)
+; CHECK-NEXT:    br label [[EXIT]]
+; CHECK:       exit:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %tmp = alloca [50 x i8], align 8
+  %tmp4 = bitcast [50 x i8]* %tmp to i8*
+  %tmp1 = getelementptr inbounds i8, i8* %tmp4, i64 1
+  call void @llvm.memset.p0i8.i64(i8* nonnull %d, i8 0, i64 10, i1 false)
+  store i8 0, i8* %tmp4, align 8
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull %tmp1, i8* nonnull %d, i64 10, i1 false)
+  br i1 %c, label %if.then, label %exit
+
+if.then:
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %f, i8* nonnull align 8 %tmp4, i64 30, i1 false)
+  br label %exit
+
+exit:
+  ret void
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
+declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)

diff  --git a/llvm/test/Transforms/MemCpyOpt/mixed-sizes.ll b/llvm/test/Transforms/MemCpyOpt/mixed-sizes.ll
new file mode 100644
index 000000000000..eb4a86fe5286
--- /dev/null
+++ b/llvm/test/Transforms/MemCpyOpt/mixed-sizes.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -memcpyopt -S | FileCheck %s
+; Handle memcpy-memcpy dependencies of 
diff ering sizes correctly.
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+; Don't delete the second memcpy, even though there's an earlier
+; memcpy with a larger size from the same address.
+
+define i32 @foo(i1 %z) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[A:%.*]] = alloca [10 x i32], align 4
+; CHECK-NEXT:    [[S:%.*]] = alloca [10 x i32], align 4
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast [10 x i32]* [[A]] to i8*
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast [10 x i32]* [[S]] to i8*
+; CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* nonnull align 16 [[TMP1]], i8 0, i64 40, i1 false)
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i64 0, i64 0
+; CHECK-NEXT:    store i32 1, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr [10 x i32], [10 x i32]* [[S]], i64 0, i64 1
+; CHECK-NEXT:    [[SCEVGEP7:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
+; CHECK-NEXT:    br i1 [[Z:%.*]], label [[FOR_BODY3_LR_PH:%.*]], label [[FOR_INC7_1:%.*]]
+; CHECK:       for.body3.lr.ph:
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 [[SCEVGEP7]], i64 17179869180, i1 false)
+; CHECK-NEXT:    br label [[FOR_INC7_1]]
+; CHECK:       for.inc7.1:
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 [[SCEVGEP7]], i64 4, i1 false)
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    ret i32 [[TMP2]]
+;
+entry:
+  %a = alloca [10 x i32]
+  %s = alloca [10 x i32]
+  %0 = bitcast [10 x i32]* %a to i8*
+  %1 = bitcast [10 x i32]* %s to i8*
+  call void @llvm.memset.p0i8.i64(i8* nonnull align 16 %1, i8 0, i64 40, i1 false)
+  %arrayidx = getelementptr inbounds [10 x i32], [10 x i32]* %a, i64 0, i64 0
+  store i32 1, i32* %arrayidx
+  %scevgep = getelementptr [10 x i32], [10 x i32]* %s, i64 0, i64 1
+  %scevgep7 = bitcast i32* %scevgep to i8*
+  br i1 %z, label %for.body3.lr.ph, label %for.inc7.1
+
+for.body3.lr.ph:                                  ; preds = %entry
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %scevgep7, i64 17179869180, i1 false)
+  br label %for.inc7.1
+
+for.inc7.1:
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %scevgep7, i64 4, i1 false)
+  %2 = load i32, i32* %arrayidx
+  ret i32 %2
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8*, i8*, i64, i1)
+declare void @llvm.memset.p0i8.i64(i8*, i8, i64, i1)

diff  --git a/llvm/test/Transforms/MemCpyOpt/nonlocal-memcpy-memcpy.ll b/llvm/test/Transforms/MemCpyOpt/nonlocal-memcpy-memcpy.ll
new file mode 100644
index 000000000000..f682b71e8c30
--- /dev/null
+++ b/llvm/test/Transforms/MemCpyOpt/nonlocal-memcpy-memcpy.ll
@@ -0,0 +1,172 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -memcpyopt -S | FileCheck %s
+
+; Test whether memcpy-memcpy dependence is optimized across
+; basic blocks (conditional branches and invokes).
+; TODO: This is not supported yet.
+
+%struct.s = type { i32, i32 }
+
+ at s_foo = private unnamed_addr constant %struct.s { i32 1, i32 2 }, align 4
+ at s_baz = private unnamed_addr constant %struct.s { i32 1, i32 2 }, align 4
+ at i = external constant i8*
+
+declare void @qux()
+declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i1)
+declare void @__cxa_throw(i8*, i8*, i8*)
+declare i32 @__gxx_personality_v0(...)
+declare i8* @__cxa_begin_catch(i8*)
+
+; A simple partial redundancy. Test that the second memcpy is optimized
+; to copy directly from the original source rather than from the temporary.
+
+define void @wobble(i8* noalias %dst, i8* %src, i1 %some_condition) {
+; CHECK-LABEL: @wobble(
+; CHECK-NEXT:  bb:
+; CHECK-NEXT:    [[TEMP:%.*]] = alloca i8, i32 64, align 1
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TEMP]], i8* nonnull align 8 [[SRC:%.*]], i64 64, i1 false)
+; CHECK-NEXT:    br i1 [[SOME_CONDITION:%.*]], label [[MORE:%.*]], label [[OUT:%.*]]
+; CHECK:       out:
+; CHECK-NEXT:    call void @qux()
+; CHECK-NEXT:    unreachable
+; CHECK:       more:
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[DST:%.*]], i8* align 8 [[TEMP]], i64 64, i1 false)
+; CHECK-NEXT:    ret void
+;
+bb:
+  %temp = alloca i8, i32 64
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %temp, i8* nonnull align 8%src, i64 64, i1 false)
+  br i1 %some_condition, label %more, label %out
+
+out:
+  call void @qux()
+  unreachable
+
+more:
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %dst, i8* align 8 %temp, i64 64, i1 false)
+  ret void
+}
+
+; A CFG triangle with a partial redundancy targeting an alloca. Test that the
+; memcpy inside the triangle is optimized to copy directly from the original
+; source rather than from the temporary.
+
+define i32 @foo(i1 %t3) {
+; CHECK-LABEL: @foo(
+; CHECK-NEXT:  bb:
+; CHECK-NEXT:    [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 4
+; CHECK-NEXT:    [[T:%.*]] = alloca [[STRUCT_S]], align 4
+; CHECK-NEXT:    [[S1:%.*]] = bitcast %struct.s* [[S]] to i8*
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[S1]], i8* align 4 bitcast (%struct.s* @s_foo to i8*), i64 8, i1 false)
+; CHECK-NEXT:    br i1 [[T3:%.*]], label [[BB4:%.*]], label [[BB7:%.*]]
+; CHECK:       bb4:
+; CHECK-NEXT:    [[T5:%.*]] = bitcast %struct.s* [[T]] to i8*
+; CHECK-NEXT:    [[S6:%.*]] = bitcast %struct.s* [[S]] to i8*
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[T5]], i8* align 4 [[S6]], i64 8, i1 false)
+; CHECK-NEXT:    br label [[BB7]]
+; CHECK:       bb7:
+; CHECK-NEXT:    [[T8:%.*]] = getelementptr [[STRUCT_S]], %struct.s* [[T]], i32 0, i32 0
+; CHECK-NEXT:    [[T9:%.*]] = load i32, i32* [[T8]], align 4
+; CHECK-NEXT:    [[T10:%.*]] = getelementptr [[STRUCT_S]], %struct.s* [[T]], i32 0, i32 1
+; CHECK-NEXT:    [[T11:%.*]] = load i32, i32* [[T10]], align 4
+; CHECK-NEXT:    [[T12:%.*]] = add i32 [[T9]], [[T11]]
+; CHECK-NEXT:    ret i32 [[T12]]
+;
+bb:
+  %s = alloca %struct.s, align 4
+  %t = alloca %struct.s, align 4
+  %s1 = bitcast %struct.s* %s to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %s1, i8* align 4 bitcast (%struct.s* @s_foo to i8*), i64 8, i1 false)
+  br i1 %t3, label %bb4, label %bb7
+
+bb4:                                              ; preds = %bb
+  %t5 = bitcast %struct.s* %t to i8*
+  %s6 = bitcast %struct.s* %s to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %t5, i8* align 4 %s6, i64 8, i1 false)
+  br label %bb7
+
+bb7:                                              ; preds = %bb4, %bb
+  %t8 = getelementptr %struct.s, %struct.s* %t, i32 0, i32 0
+  %t9 = load i32, i32* %t8, align 4
+  %t10 = getelementptr %struct.s, %struct.s* %t, i32 0, i32 1
+  %t11 = load i32, i32* %t10, align 4
+  %t12 = add i32 %t9, %t11
+  ret i32 %t12
+}
+
+; A CFG diamond with an invoke on one side, and a partially redundant memcpy
+; into an alloca on the other. Test that the memcpy inside the diamond is
+; optimized to copy ; directly from the original source rather than from the
+; temporary. This more complex test represents a relatively common usage
+; pattern.
+
+define i32 @baz(i1 %t5) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+; CHECK-LABEL: @baz(
+; CHECK-NEXT:  bb:
+; CHECK-NEXT:    [[S:%.*]] = alloca [[STRUCT_S:%.*]], align 4
+; CHECK-NEXT:    [[T:%.*]] = alloca [[STRUCT_S]], align 4
+; CHECK-NEXT:    [[S3:%.*]] = bitcast %struct.s* [[S]] to i8*
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[S3]], i8* align 4 bitcast (%struct.s* @s_baz to i8*), i64 8, i1 false)
+; CHECK-NEXT:    br i1 [[T5:%.*]], label [[BB6:%.*]], label [[BB22:%.*]]
+; CHECK:       bb6:
+; CHECK-NEXT:    invoke void @__cxa_throw(i8* null, i8* bitcast (i8** @i to i8*), i8* null)
+; CHECK-NEXT:    to label [[BB25:%.*]] unwind label [[BB9:%.*]]
+; CHECK:       bb9:
+; CHECK-NEXT:    [[T10:%.*]] = landingpad { i8*, i32 }
+; CHECK-NEXT:    catch i8* null
+; CHECK-NEXT:    br label [[BB13:%.*]]
+; CHECK:       bb13:
+; CHECK-NEXT:    [[T15:%.*]] = call i8* @__cxa_begin_catch(i8* null)
+; CHECK-NEXT:    br label [[BB23:%.*]]
+; CHECK:       bb22:
+; CHECK-NEXT:    [[T23:%.*]] = bitcast %struct.s* [[T]] to i8*
+; CHECK-NEXT:    [[S24:%.*]] = bitcast %struct.s* [[S]] to i8*
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[T23]], i8* align 4 [[S24]], i64 8, i1 false)
+; CHECK-NEXT:    br label [[BB23]]
+; CHECK:       bb23:
+; CHECK-NEXT:    [[T17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[T]], i32 0, i32 0
+; CHECK-NEXT:    [[T18:%.*]] = load i32, i32* [[T17]], align 4
+; CHECK-NEXT:    [[T19:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.s* [[T]], i32 0, i32 1
+; CHECK-NEXT:    [[T20:%.*]] = load i32, i32* [[T19]], align 4
+; CHECK-NEXT:    [[T21:%.*]] = add nsw i32 [[T18]], [[T20]]
+; CHECK-NEXT:    ret i32 [[T21]]
+; CHECK:       bb25:
+; CHECK-NEXT:    unreachable
+;
+bb:
+  %s = alloca %struct.s, align 4
+  %t = alloca %struct.s, align 4
+  %s3 = bitcast %struct.s* %s to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %s3, i8* align 4 bitcast (%struct.s* @s_baz to i8*), i64 8, i1 false)
+  br i1 %t5, label %bb6, label %bb22
+
+bb6:                                              ; preds = %bb
+  invoke void @__cxa_throw(i8* null, i8* bitcast (i8** @i to i8*), i8* null)
+  to label %bb25 unwind label %bb9
+
+bb9:                                              ; preds = %bb6
+  %t10 = landingpad { i8*, i32 }
+  catch i8* null
+  br label %bb13
+
+bb13:                                             ; preds = %bb9
+  %t15 = call i8* @__cxa_begin_catch(i8* null)
+  br label %bb23
+
+bb22:                                             ; preds = %bb
+  %t23 = bitcast %struct.s* %t to i8*
+  %s24 = bitcast %struct.s* %s to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %t23, i8* align 4 %s24, i64 8, i1 false)
+  br label %bb23
+
+bb23:                                             ; preds = %bb22, %bb13
+  %t17 = getelementptr inbounds %struct.s, %struct.s* %t, i32 0, i32 0
+  %t18 = load i32, i32* %t17, align 4
+  %t19 = getelementptr inbounds %struct.s, %struct.s* %t, i32 0, i32 1
+  %t20 = load i32, i32* %t19, align 4
+  %t21 = add nsw i32 %t18, %t20
+  ret i32 %t21
+
+bb25:                                             ; preds = %bb6
+  unreachable
+}


        


More information about the llvm-commits mailing list