[llvm] affc90e - [test][msan] Convert test into autogenerated

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Sat Sep 10 13:22:14 PDT 2022


Author: Vitaly Buka
Date: 2022-09-10T13:22:01-07:00
New Revision: affc90ed8d30badf585a93d1b6997e400099075c

URL: https://github.com/llvm/llvm-project/commit/affc90ed8d30badf585a93d1b6997e400099075c
DIFF: https://github.com/llvm/llvm-project/commit/affc90ed8d30badf585a93d1b6997e400099075c.diff

LOG: [test][msan] Convert test into autogenerated

Added: 
    

Modified: 
    llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
index 8dd81b4db0134..0cfb3f4538ea8 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
@@ -1,6 +1,7 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -msan-check-access-address=0 -S -passes=msan 2>&1 | FileCheck %s --implicit-check-not="call void @__msan_warning"
-; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s "--check-prefixes=CHECK,ORIGINS" --implicit-check-not="call void @__msan_warning"
-; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefix=ADDR --implicit-check-not="call void @__msan_warning"
+; RUN: opt < %s -msan-check-access-address=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ADDR --implicit-check-not="call void @__msan_warning"
+; RUN: opt < %s -msan-check-access-address=0 -msan-track-origins=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ORIGINS --implicit-check-not="call void @__msan_warning"
 
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
@@ -9,112 +10,224 @@ declare void @llvm.masked.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, i32, <4 x i
 declare <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>*, i32, <4 x i1>, <4 x double>)
 
 define void @Store(<4 x i64>* %p, <4 x i64> %v, <4 x i1> %mask) sanitize_memory {
+; CHECK-LABEL: @Store(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to <4 x i64>*
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[TMP0]], <4 x i64>* [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; CHECK-NEXT:    ret void
+;
+; ADDR-LABEL: @Store(
+; ADDR-NEXT:  entry:
+; ADDR-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <4 x i1>, <4 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i1>*), align 8
+; ADDR-NEXT:    call void @llvm.donothing()
+; ADDR-NEXT:    [[TMP3:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ADDR-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; ADDR-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to <4 x i64>*
+; ADDR-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[TMP0]], <4 x i64>* [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]])
+; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; ADDR-NEXT:    [[TMP6:%.*]] = bitcast <4 x i1> [[TMP2]] to i4
+; ADDR-NEXT:    [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0
+; ADDR-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; ADDR-NEXT:    br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]]
+; ADDR:       7:
+; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR5:[0-9]+]]
+; ADDR-NEXT:    unreachable
+; ADDR:       8:
+; ADDR-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ADDR-NEXT:    ret void
+;
+; ORIGINS-LABEL: @Store(
+; ORIGINS-NEXT:  entry:
+; ORIGINS-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4
+; ORIGINS-NEXT:    call void @llvm.donothing()
+; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to <4 x i64>*
+; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
+; ORIGINS-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
+; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*
+; ORIGINS-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[TMP0]], <4 x i64>* [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP7]], align 4
+; ORIGINS-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i32 1
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP8]], align 4
+; ORIGINS-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[TMP7]], i32 2
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP9]], align 4
+; ORIGINS-NEXT:    [[TMP10:%.*]] = getelementptr i32, i32* [[TMP7]], i32 3
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP10]], align 4
+; ORIGINS-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[TMP7]], i32 4
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP11]], align 4
+; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP7]], i32 5
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP12]], align 4
+; ORIGINS-NEXT:    [[TMP13:%.*]] = getelementptr i32, i32* [[TMP7]], i32 6
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP13]], align 4
+; ORIGINS-NEXT:    [[TMP14:%.*]] = getelementptr i32, i32* [[TMP7]], i32 7
+; ORIGINS-NEXT:    store i32 [[TMP1]], i32* [[TMP14]], align 4
+; ORIGINS-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ORIGINS-NEXT:    ret void
+;
 entry:
   tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask)
   ret void
 }
 
-; CHECK-LABEL: @Store(
-; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8)
-; ORIGINS: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8)
-; CHECK: %[[B:.*]] = ptrtoint <4 x i64>* %p to i64
-; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080
-; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>*
-; ORIGINS: %[[E:.*]] = add i64 %[[C]], 17592186044416
-; ORIGINS: %[[F:.*]] = and i64 %[[E]], -4
-; ORIGINS: %[[G:.*]] = inttoptr i64 %[[F]] to i32*
-; CHECK: call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %[[A]], <4 x i64>* %[[D]], i32 1, <4 x i1> %mask)
-; ORIGINS: store i32 %[[O]], i32* %[[G]], align 4
-; ORIGINS: getelementptr i32, i32* %[[G]], i32 1
-; ORIGINS: store i32 %[[O]], i32* {{.*}}, align 4
-; ORIGINS: getelementptr i32, i32* %[[G]], i32 2
-; ORIGINS: store i32 %[[O]], i32* {{.*}}, align 4
-; ORIGINS: getelementptr i32, i32* %[[G]], i32 3
-; ORIGINS: store i32 %[[O]], i32* {{.*}}, align 4
-; ORIGINS: getelementptr i32, i32* %[[G]], i32 4
-; ORIGINS: store i32 %[[O]], i32* {{.*}}, align 4
-; ORIGINS: getelementptr i32, i32* %[[G]], i32 5
-; ORIGINS: store i32 %[[O]], i32* {{.*}}, align 4
-; ORIGINS: getelementptr i32, i32* %[[G]], i32 6
-; ORIGINS: store i32 %[[O]], i32* {{.*}}, align 4
-; ORIGINS: getelementptr i32, i32* %[[G]], i32 7
-; ORIGINS: store i32 %[[O]], i32* {{.*}}, align 4
-; CHECK: tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask)
-; CHECK: ret void
-
-; ADDR-LABEL: @Store(
-; ADDR: %[[ADDRSHADOW:.*]] = load i64, {{.*}}[100 x i64]* @__msan_param_tls, i32 0, i32 0)
-; ADDR: %[[MASKSHADOW:.*]] = load <4 x i1>, {{.*}}@__msan_param_tls to i64), i64 40)
-
-; ADDR: %[[ADDRBAD:.*]] = icmp ne i64 %[[ADDRSHADOW]], 0
-; ADDR: %[[MASKSHADOWFLAT:.*]] = bitcast <4 x i1> %[[MASKSHADOW]] to i4
-; ADDR: %[[MASKBAD:.*]] = icmp ne i4 %[[MASKSHADOWFLAT]], 0
-; ADDR: %[[OR:.*]] = or i1 %[[ADDRBAD]], %[[MASKBAD]]
-; ADDR: br i1 %[[OR]], label {{.*}}, label {{.*}}
-; ADDR: call void @__msan_warning_noreturn()
-
-; ADDR: tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask)
-; ADDR: ret void
-
-
 define <4 x double> @Load(<4 x double>* %p, <4 x double> %v, <4 x i1> %mask) sanitize_memory {
+; CHECK-LABEL: @Load(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint <4 x double>* [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], 87960930222080
+; CHECK-NEXT:    [[TMP3:%.*]] = inttoptr i64 [[TMP2]] to <4 x i64>*
+; CHECK-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP3]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
+; CHECK-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
+; CHECK-NEXT:    store <4 x i64> [[_MSMASKEDLD]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    ret <4 x double> [[X]]
+;
+; ADDR-LABEL: @Load(
+; ADDR-NEXT:  entry:
+; ADDR-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; ADDR-NEXT:    [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; ADDR-NEXT:    [[TMP2:%.*]] = load <4 x i1>, <4 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 40) to <4 x i1>*), align 8
+; ADDR-NEXT:    call void @llvm.donothing()
+; ADDR-NEXT:    [[TMP3:%.*]] = ptrtoint <4 x double>* [[P:%.*]] to i64
+; ADDR-NEXT:    [[TMP4:%.*]] = xor i64 [[TMP3]], 87960930222080
+; ADDR-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to <4 x i64>*
+; ADDR-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP5]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
+; ADDR-NEXT:    [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; ADDR-NEXT:    [[TMP6:%.*]] = bitcast <4 x i1> [[TMP2]] to i4
+; ADDR-NEXT:    [[_MSCMP1:%.*]] = icmp ne i4 [[TMP6]], 0
+; ADDR-NEXT:    [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; ADDR-NEXT:    br i1 [[_MSOR]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
+; ADDR:       7:
+; ADDR-NEXT:    call void @__msan_warning_noreturn() #[[ATTR5]]
+; ADDR-NEXT:    unreachable
+; ADDR:       8:
+; ADDR-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
+; ADDR-NEXT:    store <4 x i64> [[_MSMASKEDLD]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; ADDR-NEXT:    ret <4 x double> [[X]]
+;
+; ORIGINS-LABEL: @Load(
+; ORIGINS-NEXT:  entry:
+; ORIGINS-NEXT:    [[TMP0:%.*]] = load <4 x i64>, <4 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <4 x i64>*), align 8
+; ORIGINS-NEXT:    [[TMP1:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 8) to i32*), align 4
+; ORIGINS-NEXT:    call void @llvm.donothing()
+; ORIGINS-NEXT:    [[TMP2:%.*]] = ptrtoint <4 x double>* [[P:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP3:%.*]] = xor i64 [[TMP2]], 87960930222080
+; ORIGINS-NEXT:    [[TMP4:%.*]] = inttoptr i64 [[TMP3]] to <4 x i64>*
+; ORIGINS-NEXT:    [[TMP5:%.*]] = add i64 [[TMP3]], 17592186044416
+; ORIGINS-NEXT:    [[TMP6:%.*]] = and i64 [[TMP5]], -4
+; ORIGINS-NEXT:    [[TMP7:%.*]] = inttoptr i64 [[TMP6]] to i32*
+; ORIGINS-NEXT:    [[_MSMASKEDLD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[TMP4]], i32 1, <4 x i1> [[MASK:%.*]], <4 x i64> [[TMP0]])
+; ORIGINS-NEXT:    [[TMP8:%.*]] = sub <4 x i1> zeroinitializer, [[MASK]]
+; ORIGINS-NEXT:    [[TMP9:%.*]] = sext <4 x i1> [[TMP8]] to <4 x i64>
+; ORIGINS-NEXT:    [[TMP10:%.*]] = and <4 x i64> [[TMP0]], [[TMP9]]
+; ORIGINS-NEXT:    [[TMP11:%.*]] = extractelement <4 x i64> [[TMP10]], i32 0
+; ORIGINS-NEXT:    [[TMP12:%.*]] = extractelement <4 x i64> [[TMP10]], i32 1
+; ORIGINS-NEXT:    [[TMP13:%.*]] = or i64 [[TMP11]], [[TMP12]]
+; ORIGINS-NEXT:    [[TMP14:%.*]] = extractelement <4 x i64> [[TMP10]], i32 2
+; ORIGINS-NEXT:    [[TMP15:%.*]] = or i64 [[TMP13]], [[TMP14]]
+; ORIGINS-NEXT:    [[TMP16:%.*]] = extractelement <4 x i64> [[TMP10]], i32 3
+; ORIGINS-NEXT:    [[TMP17:%.*]] = or i64 [[TMP15]], [[TMP16]]
+; ORIGINS-NEXT:    [[TMP18:%.*]] = icmp ne i64 [[TMP17]], 0
+; ORIGINS-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP7]], align 4
+; ORIGINS-NEXT:    [[TMP20:%.*]] = select i1 [[TMP18]], i32 [[TMP1]], i32 [[TMP19]]
+; ORIGINS-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P]], i32 1, <4 x i1> [[MASK]], <4 x double> [[V:%.*]])
+; ORIGINS-NEXT:    store <4 x i64> [[_MSMASKEDLD]], <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; ORIGINS-NEXT:    store i32 [[TMP20]], i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    ret <4 x double> [[X]]
+;
 entry:
   %x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v)
   ret <4 x double> %x
 }
 
-; CHECK-LABEL: @Load(
-; CHECK: %[[A:.*]] = load <4 x i64>, {{.*}}@__msan_param_tls to i64), i64 8)
-; ORIGINS: %[[O:.*]] = load i32, {{.*}}@__msan_param_origin_tls to i64), i64 8)
-; CHECK: %[[B:.*]] = ptrtoint <4 x double>* %p to i64
-; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080
-; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>*
-; ORIGINS: %[[E:.*]] = add i64 %[[C]], 17592186044416
-; ORIGINS: %[[F:.*]] = and i64 %[[E]], -4
-; ORIGINS: %[[G:.*]] = inttoptr i64 %[[F]] to i32*
-; CHECK: %[[E:.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* %[[D]], i32 1, <4 x i1> %mask, <4 x i64> %[[A]])
-; ORIGINS: %[[H:.*]] = load i32, i32* %[[G]]
-; ORIGINS: %[[O2:.*]] = select i1 %{{.*}}, i32 %[[O]], i32 %[[H]]
-; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v)
-; CHECK: store <4 x i64> %[[E]], {{.*}}@__msan_retval_tls
-; ORIGINS: store i32 %[[O2]], i32* @__msan_retval_origin_tls
-; CHECK: ret <4 x double> %[[X]]
-
-; ADDR-LABEL: @Load(
-; ADDR: %[[ADDRSHADOW:.*]] = load i64, {{.*}}[100 x i64]* @__msan_param_tls, i32 0, i32 0)
-; ADDR: %[[MASKSHADOW:.*]] = load <4 x i1>, {{.*}}@__msan_param_tls to i64), i64 40)
-
-; ADDR: %[[ADDRBAD:.*]] = icmp ne i64 %[[ADDRSHADOW]], 0
-; ADDR: %[[MASKSHADOWFLAT:.*]] = bitcast <4 x i1> %[[MASKSHADOW]] to i4
-; ADDR: %[[MASKBAD:.*]] = icmp ne i4 %[[MASKSHADOWFLAT]], 0
-; ADDR: %[[OR:.*]] = or i1 %[[ADDRBAD]], %[[MASKBAD]]
-; ADDR: br i1 %[[OR]], label {{.*}}, label {{.*}}
-; ADDR: call void @__msan_warning_noreturn()
-
-; ADDR: = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v)
-; ADDR: ret <4 x double>
-
 define void @StoreNoSanitize(<4 x i64>* %p, <4 x i64> %v, <4 x i1> %mask) {
+; CHECK-LABEL: @StoreNoSanitize(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[TMP0:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; CHECK-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to <4 x i64>*
+; CHECK-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
+; CHECK-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; CHECK-NEXT:    ret void
+;
+; ADDR-LABEL: @StoreNoSanitize(
+; ADDR-NEXT:  entry:
+; ADDR-NEXT:    call void @llvm.donothing()
+; ADDR-NEXT:    [[TMP0:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ADDR-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; ADDR-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to <4 x i64>*
+; ADDR-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
+; ADDR-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ADDR-NEXT:    ret void
+;
+; ORIGINS-LABEL: @StoreNoSanitize(
+; ORIGINS-NEXT:  entry:
+; ORIGINS-NEXT:    call void @llvm.donothing()
+; ORIGINS-NEXT:    [[TMP0:%.*]] = ptrtoint <4 x i64>* [[P:%.*]] to i64
+; ORIGINS-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], 87960930222080
+; ORIGINS-NEXT:    [[TMP2:%.*]] = inttoptr i64 [[TMP1]] to <4 x i64>*
+; ORIGINS-NEXT:    [[TMP3:%.*]] = add i64 [[TMP1]], 17592186044416
+; ORIGINS-NEXT:    [[TMP4:%.*]] = and i64 [[TMP3]], -4
+; ORIGINS-NEXT:    [[TMP5:%.*]] = inttoptr i64 [[TMP4]] to i32*
+; ORIGINS-NEXT:    call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* [[TMP2]], i32 1, <4 x i1> [[MASK:%.*]])
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP5]], align 4
+; ORIGINS-NEXT:    [[TMP6:%.*]] = getelementptr i32, i32* [[TMP5]], i32 1
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP6]], align 4
+; ORIGINS-NEXT:    [[TMP7:%.*]] = getelementptr i32, i32* [[TMP5]], i32 2
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP7]], align 4
+; ORIGINS-NEXT:    [[TMP8:%.*]] = getelementptr i32, i32* [[TMP5]], i32 3
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP8]], align 4
+; ORIGINS-NEXT:    [[TMP9:%.*]] = getelementptr i32, i32* [[TMP5]], i32 4
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP9]], align 4
+; ORIGINS-NEXT:    [[TMP10:%.*]] = getelementptr i32, i32* [[TMP5]], i32 5
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP10]], align 4
+; ORIGINS-NEXT:    [[TMP11:%.*]] = getelementptr i32, i32* [[TMP5]], i32 6
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP11]], align 4
+; ORIGINS-NEXT:    [[TMP12:%.*]] = getelementptr i32, i32* [[TMP5]], i32 7
+; ORIGINS-NEXT:    store i32 0, i32* [[TMP12]], align 4
+; ORIGINS-NEXT:    tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[P]], i32 1, <4 x i1> [[MASK]])
+; ORIGINS-NEXT:    ret void
+;
 entry:
   tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask)
   ret void
 }
 
-; CHECK-LABEL: @StoreNoSanitize(
-; CHECK: %[[B:.*]] = ptrtoint <4 x i64>* %p to i64
-; CHECK: %[[C:.*]] = xor i64 %[[B]], 87960930222080
-; CHECK: %[[D:.*]] = inttoptr i64 %[[C]] to <4 x i64>*
-; CHECK: call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> zeroinitializer, <4 x i64>* %[[D]], i32 1, <4 x i1> %mask)
-; CHECK: tail call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> %v, <4 x i64>* %p, i32 1, <4 x i1> %mask)
-; CHECK: ret void
-
 define <4 x double> @LoadNoSanitize(<4 x double>* %p, <4 x double> %v, <4 x i1> %mask) {
+; CHECK-LABEL: @LoadNoSanitize(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    call void @llvm.donothing()
+; CHECK-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
+; CHECK-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; CHECK-NEXT:    ret <4 x double> [[X]]
+;
+; ADDR-LABEL: @LoadNoSanitize(
+; ADDR-NEXT:  entry:
+; ADDR-NEXT:    call void @llvm.donothing()
+; ADDR-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
+; ADDR-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; ADDR-NEXT:    ret <4 x double> [[X]]
+;
+; ORIGINS-LABEL: @LoadNoSanitize(
+; ORIGINS-NEXT:  entry:
+; ORIGINS-NEXT:    call void @llvm.donothing()
+; ORIGINS-NEXT:    [[X:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[P:%.*]], i32 1, <4 x i1> [[MASK:%.*]], <4 x double> [[V:%.*]])
+; ORIGINS-NEXT:    store <4 x i64> zeroinitializer, <4 x i64>* bitcast ([100 x i64]* @__msan_retval_tls to <4 x i64>*), align 8
+; ORIGINS-NEXT:    store i32 0, i32* @__msan_retval_origin_tls, align 4
+; ORIGINS-NEXT:    ret <4 x double> [[X]]
+;
 entry:
   %x = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v)
   ret <4 x double> %x
 }
 
-; CHECK-LABEL: @LoadNoSanitize(
-; CHECK: %[[X:.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* %p, i32 1, <4 x i1> %mask, <4 x double> %v)
-; CHECK: store <4 x i64> zeroinitializer, {{.*}}@__msan_retval_tls to <4 x i64>*)
-; CHECK: ret <4 x double> %[[X]]


        


More information about the llvm-commits mailing list