[PATCH] Fix bug in load PRE in GVN.cpp

Justin Bogner mail at justinbogner.com
Thu May 1 16:08:14 PDT 2014


This is obviously correct. LGTM.

Akira Hatanaka <ahatanak at gmail.com> writes:
> This patch fixes GVN::AnalyzeLoadAvailability to pass the phi-translated
> pointer address of the load instruction that is processed instead of the
> untranslated address to AnalyzeLoadFromClobberingLoad.
>
> Without the fix, GVN::AnalyzeLoadAvailability incorrectly determines a load
> value is available or unavailable in a basic block, which causes load-PRE to
> incorrectly determine a load is partially redundant when it isn't (see first
> function in test case) or it isn't partially redundant when it is (second
> function in test case).
>
> I tested this patch running the test suite and didn't see any failing tests.
>
> <rdar://problem/16638765>.
>
> Please review.
>
>
> diff --git a/lib/Transforms/Scalar/GVN.cpp b/lib/Transforms/Scalar/GVN.cpp
> index e7156fd..0253a05 100644
> --- a/lib/Transforms/Scalar/GVN.cpp
> +++ b/lib/Transforms/Scalar/GVN.cpp
> @@ -1421,8 +1421,7 @@ void GVN::AnalyzeLoadAvailability(LoadInst *LI, LoadDepVect &Deps,
>          // If this is a clobber and L is the first instruction in its block, then
>          // we have the first instruction in the entry block.
>          if (DepLI != LI && Address && DL) {
> -          int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(),
> -                                                     LI->getPointerOperand(),
> +          int Offset = AnalyzeLoadFromClobberingLoad(LI->getType(), Address,
>                                                       DepLI, *DL);
>  
>            if (Offset != -1) {
> diff --git a/test/Transforms/GVN/load-pre-nonlocal.ll b/test/Transforms/GVN/load-pre-nonlocal.ll
> new file mode 100644
> index 0000000..7bac1b7
> --- /dev/null
> +++ b/test/Transforms/GVN/load-pre-nonlocal.ll
> @@ -0,0 +1,87 @@
> +; RUN: opt -S -o - -basicaa -domtree -gvn %s | FileCheck %s
> +
> +target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
> +
> +%struct.S1 = type { i32, i32 }
> +
> + at a2 = common global i32* null, align 8
> + at a = common global i32* null, align 8
> + at s1 = common global %struct.S1 zeroinitializer, align 8
> +
> +; Check that GVN doesn't determine %2 is partially redundant.
> +
> +; CHECK-LABEL: define i32 @volatile_load
> +; CHECK: for.body:
> +; CHECK: %2 = load i32*
> +; CHECK: %3 = load volatile i32*
> +; CHECK: for.cond.for.end_crit_edge:
> +
> +define i32 @volatile_load(i32 %n) {
> +entry:
> +  %cmp6 = icmp sgt i32 %n, 0
> +  br i1 %cmp6, label %for.body.lr.ph, label %for.end
> +
> +for.body.lr.ph:
> +  %0 = load i32** @a2, align 8, !tbaa !1
> +  %1 = load i32** @a, align 8, !tbaa !1
> +  br label %for.body
> +
> +for.body:
> +  %indvars.iv = phi i64 [ 0, %for.body.lr.ph ], [ %indvars.iv.next, %for.body ]
> +  %s.09 = phi i32 [ 0, %for.body.lr.ph ], [ %add, %for.body ]
> +  %p.08 = phi i32* [ %0, %for.body.lr.ph ], [ %incdec.ptr, %for.body ]
> +  %2 = load i32* %p.08, align 4, !tbaa !5
> +  %arrayidx = getelementptr inbounds i32* %1, i64 %indvars.iv
> +  store i32 %2, i32* %arrayidx, align 4, !tbaa !5
> +  %3 = load volatile i32* %p.08, align 4, !tbaa !5
> +  %add = add nsw i32 %3, %s.09
> +  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
> +  %incdec.ptr = getelementptr inbounds i32* %p.08, i64 1
> +  %lftr.wideiv = trunc i64 %indvars.iv.next to i32
> +  %exitcond = icmp ne i32 %lftr.wideiv, %n
> +  br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
> +
> +for.cond.for.end_crit_edge:
> +  %add.lcssa = phi i32 [ %add, %for.body ]
> +  br label %for.end
> +
> +for.end:
> +  %s.0.lcssa = phi i32 [ %add.lcssa, %for.cond.for.end_crit_edge ], [ 0, %entry ]
> +  ret i32 %s.0.lcssa
> +}
> +
> +; %1 is partially redundant if %0 can be widened to a 64-bit load.
> +
> +; CHECK-LABEL: define i32 @overaligned_load
> +; CHECK: if.end:
> +; CHECK-NOT: %1 = load i32*
> +
> +define i32 @overaligned_load(i32 %a, i32* nocapture %b) {
> +entry:
> +  %cmp = icmp sgt i32 %a, 0
> +  br i1 %cmp, label %if.then, label %if.else
> +
> +if.then:
> +  %0 = load i32* getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), align 8, !tbaa !5
> +  br label %if.end
> +
> +if.else:
> +  %arrayidx = getelementptr inbounds i32* %b, i64 2
> +  store i32 10, i32* %arrayidx, align 4, !tbaa !5
> +  br label %if.end
> +
> +if.end:
> +  %i.0 = phi i32 [ %0, %if.then ], [ 0, %if.else ]
> +  %p.0 = phi i32* [ getelementptr inbounds (%struct.S1* @s1, i64 0, i32 0), %if.then ], [ %b, %if.else ]
> +  %add.ptr = getelementptr inbounds i32* %p.0, i64 1
> +  %1 = load i32* %add.ptr, align 4, !tbaa !5
> +  %add1 = add nsw i32 %1, %i.0
> +  ret i32 %add1
> +}
> +
> +!1 = metadata !{metadata !2, metadata !2, i64 0}
> +!2 = metadata !{metadata !"any pointer", metadata !3, i64 0}
> +!3 = metadata !{metadata !"omnipotent char", metadata !4, i64 0}
> +!4 = metadata !{metadata !"Simple C/C++ TBAA"}
> +!5 = metadata !{metadata !6, metadata !6, i64 0}
> +!6 = metadata !{metadata !"int", metadata !3, i64 0}
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits



More information about the llvm-commits mailing list