[llvm] r274666 - [esan|wset] Fix incorrect memory size assert

Derek Bruening via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 6 13:13:53 PDT 2016


Author: bruening
Date: Wed Jul  6 15:13:53 2016
New Revision: 274666

URL: http://llvm.org/viewvc/llvm-project?rev=274666&view=rev
Log:
[esan|wset] Fix incorrect memory size assert

Summary:
Fixes an incorrect assert that fails on 128-bit-sized loads or stores.
Augments the wset tests to include this case.

Reviewers: aizatsky

Subscribers: vitalybuka, zhaoqin, kcc, eugenis, llvm-commits

Differential Revision: http://reviews.llvm.org/D22062

Modified:
    llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
    llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
    llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll

Modified: llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp?rev=274666&r1=274665&r2=274666&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp (original)
+++ llvm/trunk/lib/Transforms/Instrumentation/EfficiencySanitizer.cpp Wed Jul  6 15:13:53 2016
@@ -671,7 +671,7 @@ bool EfficiencySanitizer::instrumentLoad
       NumFastpaths++;
       return true;
     }
-    if (Alignment == 0 || Alignment >= 8 || (Alignment % TypeSizeBytes) == 0)
+    if (Alignment == 0 || (Alignment % TypeSizeBytes) == 0)
       OnAccessFunc = IsStore ? EsanAlignedStore[Idx] : EsanAlignedLoad[Idx];
     else
       OnAccessFunc = IsStore ? EsanUnalignedStore[Idx] : EsanUnalignedLoad[Idx];
@@ -832,7 +832,7 @@ bool EfficiencySanitizer::instrumentFast
   // getMemoryAccessFuncIndex has already ruled out a size larger than 16
   // and thus larger than a cache line for platforms this tool targets
   // (and our shadow memory setup assumes 64-byte cache lines).
-  assert(TypeSize <= 64);
+  assert(TypeSize <= 128);
   if (!(TypeSize == 8 ||
         (Alignment % (TypeSize / 8)) == 0)) {
     if (ClAssumeIntraCacheLine)

Modified: llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll?rev=274666&r1=274665&r2=274666&view=diff
==============================================================================
--- llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll (original)
+++ llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_basic.ll Wed Jul  6 15:13:53 2016
@@ -90,6 +90,27 @@ entry:
 ; CHECK-NEXT:   ret i64 %tmp1
 }
 
+define i128 @aligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 16
+  ret i128 %tmp1
+; CHECK:        %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
+; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT:   %3 = lshr i64 %2, 6
+; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   %5 = load i8, i8* %4
+; CHECK-NEXT:   %6 = and i8 %5, -127
+; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
+; CHECK-NEXT:   br i1 %7, label %8, label %11
+; CHECK:        %9 = or i8 %5, -127
+; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   store i8 %9, i8* %10
+; CHECK-NEXT:   br label %11
+; CHECK:        %tmp1 = load i128, i128* %a, align 16
+; CHECK-NEXT:   ret i128 %tmp1
+}
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Not guaranteed to be intra-cache-line, but our defaults are to
 ; assume they are:
@@ -157,6 +178,27 @@ entry:
 ; CHECK-NEXT:   ret i64 %tmp1
 }
 
+define i128 @unaligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 8
+  ret i128 %tmp1
+; CHECK:        %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
+; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT:   %3 = lshr i64 %2, 6
+; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   %5 = load i8, i8* %4
+; CHECK-NEXT:   %6 = and i8 %5, -127
+; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
+; CHECK-NEXT:   br i1 %7, label %8, label %11
+; CHECK:        %9 = or i8 %5, -127
+; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   store i8 %9, i8* %10
+; CHECK-NEXT:   br label %11
+; CHECK:        %tmp1 = load i128, i128* %a, align 8
+; CHECK-NEXT:   ret i128 %tmp1
+}
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Ensure that esan converts intrinsics to calls:
 

Modified: llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll?rev=274666&r1=274665&r2=274666&view=diff
==============================================================================
--- llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll (original)
+++ llvm/trunk/test/Instrumentation/EfficiencySanitizer/working_set_strict.ll Wed Jul  6 15:13:53 2016
@@ -91,6 +91,27 @@ entry:
 ; CHECK-NEXT:   ret i64 %tmp1
 }
 
+define i128 @aligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 16
+  ret i128 %tmp1
+; CHECK:        %0 = ptrtoint i128* %a to i64
+; CHECK-NEXT:   %1 = and i64 %0, 17592186044415
+; CHECK-NEXT:   %2 = add i64 %1, 1337006139375616
+; CHECK-NEXT:   %3 = lshr i64 %2, 6
+; CHECK-NEXT:   %4 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   %5 = load i8, i8* %4
+; CHECK-NEXT:   %6 = and i8 %5, -127
+; CHECK-NEXT:   %7 = icmp ne i8 %6, -127
+; CHECK-NEXT:   br i1 %7, label %8, label %11
+; CHECK:        %9 = or i8 %5, -127
+; CHECK-NEXT:   %10 = inttoptr i64 %3 to i8*
+; CHECK-NEXT:   store i8 %9, i8* %10
+; CHECK-NEXT:   br label %11
+; CHECK:        %tmp1 = load i128, i128* %a, align 16
+; CHECK-NEXT:   ret i128 %tmp1
+}
+
 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
 ; Not guaranteed to be intra-cache-line
 
@@ -123,3 +144,13 @@ entry:
 ; CHECK-NEXT:   %tmp1 = load i64, i64* %a, align 4
 ; CHECK-NEXT:   ret i64 %tmp1
 }
+
+define i128 @unaligned16(i128* %a) {
+entry:
+  %tmp1 = load i128, i128* %a, align 8
+  ret i128 %tmp1
+; CHECK:        %0 = bitcast i128* %a to i8*
+; CHECK-NEXT:   call void @__esan_unaligned_load16(i8* %0)
+; CHECK-NEXT:   %tmp1 = load i128, i128* %a, align 8
+; CHECK-NEXT:   ret i128 %tmp1
+}




More information about the llvm-commits mailing list