[llvm] 6f3276d - [msan] Check mask and pointers shadow
Vitaly Buka via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 19 13:10:10 PDT 2022
Author: Vitaly Buka
Date: 2022-09-19T13:09:56-07:00
New Revision: 6f3276d57e265be0996e2f67e2e872401da8f511
URL: https://github.com/llvm/llvm-project/commit/6f3276d57e265be0996e2f67e2e872401da8f511
DIFF: https://github.com/llvm/llvm-project/commit/6f3276d57e265be0996e2f67e2e872401da8f511.diff
LOG: [msan] Check mask and pointers shadow
Msan has default handler for unknown instructions which
previously applied to these as well. However depending on
mask, not all pointers or passthru part will be used. This
allows other passes to insert undef into sum arguments.
As result, default strict instruction handler can produce false reports.
Reviewed By: kda, kstoimenov
Differential Revision: https://reviews.llvm.org/D133678
Added:
Modified:
llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index d22f81c2f082..9a73231b4b53 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -3330,19 +3330,71 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
void handleMaskedExpandLoad(IntrinsicInst &I) {
- // PassThru can be undef, so default visitInstruction is too strict.
- // TODO: Provide real implementation.
+ IRBuilder<> IRB(&I);
+ Value *Ptr = I.getArgOperand(0);
+ Value *Mask = I.getArgOperand(1);
+
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Ptr, &I);
+ insertShadowCheck(Mask, &I);
+ }
+
+ // TODO: Check loaded shadow.
+
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
+ void handleMaskedCompressStore(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Ptr = I.getArgOperand(1);
+ Value *Mask = I.getArgOperand(2);
+
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Ptr, &I);
+ insertShadowCheck(Mask, &I);
+ }
+
+ // TODO: Store shadow.
+ }
+
void handleMaskedGather(IntrinsicInst &I) {
- // PassThru can be undef, so default visitInstruction is too strict.
- // TODO: Provide real implementation.
+ IRBuilder<> IRB(&I);
+ Value *Ptrs = I.getArgOperand(0);
+ Value *Mask = I.getArgOperand(2);
+
+ Type *PtrsShadowTy = getShadowTy(Ptrs);
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Mask, &I);
+ Value *MaskedPtrShadow = IRB.CreateSelect(
+ Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
+ "_msmaskedptrs");
+ insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
+ }
+
+ // TODO: Check loaded shadow.
+
setShadow(&I, getCleanShadow(&I));
setOrigin(&I, getCleanOrigin());
}
+ void handleMaskedScatter(IntrinsicInst &I) {
+ IRBuilder<> IRB(&I);
+ Value *Ptrs = I.getArgOperand(1);
+ Value *Mask = I.getArgOperand(3);
+
+ Type *PtrsShadowTy = getShadowTy(Ptrs);
+ if (ClCheckAccessAddress) {
+ insertShadowCheck(Mask, &I);
+ Value *MaskedPtrShadow = IRB.CreateSelect(
+ Mask, getShadow(Ptrs), Constant::getNullValue((PtrsShadowTy)),
+ "_msmaskedptrs");
+ insertShadowCheck(MaskedPtrShadow, getOrigin(Ptrs), &I);
+ }
+
+ // TODO: Store shadow.
+ }
+
void handleMaskedStore(IntrinsicInst &I) {
IRBuilder<> IRB(&I);
Value *V = I.getArgOperand(0);
@@ -3533,12 +3585,18 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case Intrinsic::bswap:
handleBswap(I);
break;
+ case Intrinsic::masked_compressstore:
+ handleMaskedCompressStore(I);
+ break;
case Intrinsic::masked_expandload:
handleMaskedExpandLoad(I);
break;
case Intrinsic::masked_gather:
handleMaskedGather(I);
break;
+ case Intrinsic::masked_scatter:
+ handleMaskedScatter(I);
+ break;
case Intrinsic::masked_store:
handleMaskedStore(I);
break;
diff --git a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
index 55bd2cd7795f..76ffcd9d065d 100644
--- a/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
+++ b/llvm/test/Instrumentation/MemorySanitizer/masked-store-load.ll
@@ -238,8 +238,21 @@ define <16 x float> @Gather(<16 x float*> %ptrs, <16 x i1> %mask, <16 x float> %
; CHECK-NEXT: ret <16 x float> [[RET]]
;
; ADDR-LABEL: @Gather(
+; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 128) to <16 x i1>*), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i64>, <16 x i64>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i64>*), align 8
; ADDR-NEXT: call void @llvm.donothing()
-; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
+; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> [[TMP2]], <16 x i64> zeroinitializer
+; ADDR-NEXT: [[TMP3:%.*]] = bitcast <16 x i1> [[TMP1]] to i16
+; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i16 [[TMP3]], 0
+; ADDR-NEXT: [[TMP4:%.*]] = bitcast <16 x i64> [[_MSMASKEDPTRS]] to i1024
+; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i1024 [[TMP4]], 0
+; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; ADDR: 5:
+; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
+; ADDR-NEXT: unreachable
+; ADDR: 6:
+; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
; ADDR-NEXT: store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
; ADDR-NEXT: ret <16 x float> [[RET]]
;
@@ -263,7 +276,8 @@ define <16 x float> @GatherNoSanitize(<16 x float*> %ptrs, <16 x i1> %mask, <16
;
; ADDR-LABEL: @GatherNoSanitize(
; ADDR-NEXT: call void @llvm.donothing()
-; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
+; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <16 x i1> [[MASK:%.*]], <16 x i64> zeroinitializer, <16 x i64> zeroinitializer
+; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.gather.v16f32.v16p0f32(<16 x float*> [[PTRS:%.*]], i32 4, <16 x i1> [[MASK]], <16 x float> [[PASSTHRU:%.*]])
; ADDR-NEXT: store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
; ADDR-NEXT: ret <16 x float> [[RET]]
;
@@ -281,76 +295,30 @@ define <16 x float> @GatherNoSanitize(<16 x float*> %ptrs, <16 x i1> %mask, <16
; FIXME: Provide real implementation.
define void @Scatter(<8 x i32> %value, <8 x i32*> %ptrs, <8 x i1> %mask) sanitize_memory {
; CHECK-LABEL: @Scatter(
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i64>*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <8 x i1>, <8 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 96) to <8 x i1>*), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
-; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
-; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
-; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP5]], 0
-; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i1> [[TMP3]] to i8
-; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i8 [[TMP6]], 0
-; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0:![0-9]+]]
-; CHECK: 7:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR7:[0-9]+]]
-; CHECK-NEXT: unreachable
-; CHECK: 8:
; CHECK-NEXT: call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS:%.*]], i32 8, <8 x i1> [[MASK:%.*]])
; CHECK-NEXT: ret void
;
; ADDR-LABEL: @Scatter(
-; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
+; ADDR-NEXT: [[TMP1:%.*]] = load <8 x i1>, <8 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 96) to <8 x i1>*), align 8
; ADDR-NEXT: [[TMP2:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i64>*), align 8
-; ADDR-NEXT: [[TMP3:%.*]] = load <8 x i1>, <8 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 96) to <8 x i1>*), align 8
; ADDR-NEXT: call void @llvm.donothing()
-; ADDR-NEXT: [[TMP4:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
-; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP4]], 0
-; ADDR-NEXT: [[TMP5:%.*]] = bitcast <8 x i64> [[TMP2]] to i512
-; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP5]], 0
+; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> [[TMP2]], <8 x i64> zeroinitializer
+; ADDR-NEXT: [[TMP3:%.*]] = bitcast <8 x i1> [[TMP1]] to i8
+; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i8 [[TMP3]], 0
+; ADDR-NEXT: [[TMP4:%.*]] = bitcast <8 x i64> [[_MSMASKEDPTRS]] to i512
+; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP4]], 0
; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; ADDR-NEXT: [[TMP6:%.*]] = bitcast <8 x i1> [[TMP3]] to i8
-; ADDR-NEXT: [[_MSCMP2:%.*]] = icmp ne i8 [[TMP6]], 0
-; ADDR-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; ADDR-NEXT: br i1 [[_MSOR3]], label [[TMP7:%.*]], label [[TMP8:%.*]], !prof [[PROF0]]
-; ADDR: 7:
+; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP5:%.*]], label [[TMP6:%.*]], !prof [[PROF0]]
+; ADDR: 5:
; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
; ADDR-NEXT: unreachable
-; ADDR: 8:
-; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS:%.*]], i32 8, <8 x i1> [[MASK:%.*]])
+; ADDR: 6:
+; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS:%.*]], i32 8, <8 x i1> [[MASK]])
; ADDR-NEXT: ret void
;
; ORIGINS-LABEL: @Scatter(
-; ORIGINS-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <8 x i32>*), align 8
-; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
-; ORIGINS-NEXT: [[TMP3:%.*]] = load <8 x i64>, <8 x i64>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 32) to <8 x i64>*), align 8
-; ORIGINS-NEXT: [[TMP4:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 32) to i32*), align 4
-; ORIGINS-NEXT: [[TMP5:%.*]] = load <8 x i1>, <8 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 96) to <8 x i1>*), align 8
-; ORIGINS-NEXT: [[TMP6:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 96) to i32*), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
-; ORIGINS-NEXT: [[TMP7:%.*]] = bitcast <8 x i32> [[TMP1]] to i256
-; ORIGINS-NEXT: [[_MSCMP:%.*]] = icmp ne i256 [[TMP7]], 0
-; ORIGINS-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0:![0-9]+]]
-; ORIGINS: 8:
-; ORIGINS-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR7:[0-9]+]]
-; ORIGINS-NEXT: unreachable
-; ORIGINS: 9:
-; ORIGINS-NEXT: [[TMP10:%.*]] = bitcast <8 x i64> [[TMP3]] to i512
-; ORIGINS-NEXT: [[_MSCMP1:%.*]] = icmp ne i512 [[TMP10]], 0
-; ORIGINS-NEXT: br i1 [[_MSCMP1]], label [[TMP11:%.*]], label [[TMP12:%.*]], !prof [[PROF0]]
-; ORIGINS: 11:
-; ORIGINS-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR7]]
-; ORIGINS-NEXT: unreachable
-; ORIGINS: 12:
-; ORIGINS-NEXT: [[TMP13:%.*]] = bitcast <8 x i1> [[TMP5]] to i8
-; ORIGINS-NEXT: [[_MSCMP2:%.*]] = icmp ne i8 [[TMP13]], 0
-; ORIGINS-NEXT: br i1 [[_MSCMP2]], label [[TMP14:%.*]], label [[TMP15:%.*]], !prof [[PROF0]]
-; ORIGINS: 14:
-; ORIGINS-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR7]]
-; ORIGINS-NEXT: unreachable
-; ORIGINS: 15:
; ORIGINS-NEXT: call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS:%.*]], i32 8, <8 x i1> [[MASK:%.*]])
; ORIGINS-NEXT: ret void
;
@@ -366,7 +334,8 @@ define void @ScatterNoSanitize(<8 x i32> %value, <8 x i32*> %ptrs, <8 x i1> %mas
;
; ADDR-LABEL: @ScatterNoSanitize(
; ADDR-NEXT: call void @llvm.donothing()
-; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS:%.*]], i32 8, <8 x i1> [[MASK:%.*]])
+; ADDR-NEXT: [[_MSMASKEDPTRS:%.*]] = select <8 x i1> [[MASK:%.*]], <8 x i64> zeroinitializer, <8 x i64> zeroinitializer
+; ADDR-NEXT: call void @llvm.masked.scatter.v8i32.v8p0i32(<8 x i32> [[VALUE:%.*]], <8 x i32*> [[PTRS:%.*]], i32 8, <8 x i1> [[MASK]])
; ADDR-NEXT: ret void
;
; ORIGINS-LABEL: @ScatterNoSanitize(
@@ -387,7 +356,18 @@ define <16 x float> @ExpandLoad(float* %ptr, <16 x i1> %mask, <16 x float> %pass
; CHECK-NEXT: ret <16 x float> [[RET]]
;
; ADDR-LABEL: @ExpandLoad(
+; ADDR-NEXT: [[TMP1:%.*]] = load i64, i64* getelementptr inbounds ([100 x i64], [100 x i64]* @__msan_param_tls, i32 0, i32 0), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 8) to <16 x i1>*), align 8
; ADDR-NEXT: call void @llvm.donothing()
+; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; ADDR-NEXT: [[TMP3:%.*]] = bitcast <16 x i1> [[TMP2]] to i16
+; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i16 [[TMP3]], 0
+; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
+; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; ADDR: 4:
+; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
+; ADDR-NEXT: unreachable
+; ADDR: 5:
; ADDR-NEXT: [[RET:%.*]] = call <16 x float> @llvm.masked.expandload.v16f32(float* [[PTR:%.*]], <16 x i1> [[MASK:%.*]], <16 x float> [[PASSTHRU:%.*]])
; ADDR-NEXT: store <16 x i32> zeroinitializer, <16 x i32>* bitcast ([100 x i64]* @__msan_retval_tls to <16 x i32>*), align 8
; ADDR-NEXT: ret <16 x float> [[RET]]
@@ -430,73 +410,28 @@ define <16 x float> @ExpandLoadNoSanitize(float* %ptr, <16 x i1> %mask, <16 x fl
; FIXME: Provide real implementation.
define void @CompressStore(<16 x float> %value, float* %ptr, <16 x i1> %mask) sanitize_memory {
; CHECK-LABEL: @CompressStore(
-; CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i32>*), align 8
-; CHECK-NEXT: [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to i64*), align 8
-; CHECK-NEXT: [[TMP3:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <16 x i1>*), align 8
; CHECK-NEXT: call void @llvm.donothing()
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
-; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
-; CHECK-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
-; CHECK-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; CHECK-NEXT: [[TMP5:%.*]] = bitcast <16 x i1> [[TMP3]] to i16
-; CHECK-NEXT: [[_MSCMP2:%.*]] = icmp ne i16 [[TMP5]], 0
-; CHECK-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; CHECK-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; CHECK: 6:
-; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
-; CHECK-NEXT: unreachable
-; CHECK: 7:
; CHECK-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR:%.*]], <16 x i1> [[MASK:%.*]])
; CHECK-NEXT: ret void
;
; ADDR-LABEL: @CompressStore(
-; ADDR-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i32>*), align 8
-; ADDR-NEXT: [[TMP2:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to i64*), align 8
-; ADDR-NEXT: [[TMP3:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <16 x i1>*), align 8
+; ADDR-NEXT: [[TMP1:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to i64*), align 8
+; ADDR-NEXT: [[TMP2:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <16 x i1>*), align 8
; ADDR-NEXT: call void @llvm.donothing()
-; ADDR-NEXT: [[TMP4:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
-; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP4]], 0
-; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP2]], 0
+; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0
+; ADDR-NEXT: [[TMP3:%.*]] = bitcast <16 x i1> [[TMP2]] to i16
+; ADDR-NEXT: [[_MSCMP1:%.*]] = icmp ne i16 [[TMP3]], 0
; ADDR-NEXT: [[_MSOR:%.*]] = or i1 [[_MSCMP]], [[_MSCMP1]]
-; ADDR-NEXT: [[TMP5:%.*]] = bitcast <16 x i1> [[TMP3]] to i16
-; ADDR-NEXT: [[_MSCMP2:%.*]] = icmp ne i16 [[TMP5]], 0
-; ADDR-NEXT: [[_MSOR3:%.*]] = or i1 [[_MSOR]], [[_MSCMP2]]
-; ADDR-NEXT: br i1 [[_MSOR3]], label [[TMP6:%.*]], label [[TMP7:%.*]], !prof [[PROF0]]
-; ADDR: 6:
+; ADDR-NEXT: br i1 [[_MSOR]], label [[TMP4:%.*]], label [[TMP5:%.*]], !prof [[PROF0]]
+; ADDR: 4:
; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR7]]
; ADDR-NEXT: unreachable
-; ADDR: 7:
+; ADDR: 5:
; ADDR-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR:%.*]], <16 x i1> [[MASK:%.*]])
; ADDR-NEXT: ret void
;
; ORIGINS-LABEL: @CompressStore(
-; ORIGINS-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* bitcast ([100 x i64]* @__msan_param_tls to <16 x i32>*), align 8
-; ORIGINS-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__msan_param_origin_tls, i32 0, i32 0), align 4
-; ORIGINS-NEXT: [[TMP3:%.*]] = load i64, i64* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 64) to i64*), align 8
-; ORIGINS-NEXT: [[TMP4:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 64) to i32*), align 4
-; ORIGINS-NEXT: [[TMP5:%.*]] = load <16 x i1>, <16 x i1>* inttoptr (i64 add (i64 ptrtoint ([100 x i64]* @__msan_param_tls to i64), i64 72) to <16 x i1>*), align 8
-; ORIGINS-NEXT: [[TMP6:%.*]] = load i32, i32* inttoptr (i64 add (i64 ptrtoint ([200 x i32]* @__msan_param_origin_tls to i64), i64 72) to i32*), align 4
; ORIGINS-NEXT: call void @llvm.donothing()
-; ORIGINS-NEXT: [[TMP7:%.*]] = bitcast <16 x i32> [[TMP1]] to i512
-; ORIGINS-NEXT: [[_MSCMP:%.*]] = icmp ne i512 [[TMP7]], 0
-; ORIGINS-NEXT: br i1 [[_MSCMP]], label [[TMP8:%.*]], label [[TMP9:%.*]], !prof [[PROF0]]
-; ORIGINS: 8:
-; ORIGINS-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP2]]) #[[ATTR7]]
-; ORIGINS-NEXT: unreachable
-; ORIGINS: 9:
-; ORIGINS-NEXT: [[_MSCMP1:%.*]] = icmp ne i64 [[TMP3]], 0
-; ORIGINS-NEXT: br i1 [[_MSCMP1]], label [[TMP10:%.*]], label [[TMP11:%.*]], !prof [[PROF0]]
-; ORIGINS: 10:
-; ORIGINS-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP4]]) #[[ATTR7]]
-; ORIGINS-NEXT: unreachable
-; ORIGINS: 11:
-; ORIGINS-NEXT: [[TMP12:%.*]] = bitcast <16 x i1> [[TMP5]] to i16
-; ORIGINS-NEXT: [[_MSCMP2:%.*]] = icmp ne i16 [[TMP12]], 0
-; ORIGINS-NEXT: br i1 [[_MSCMP2]], label [[TMP13:%.*]], label [[TMP14:%.*]], !prof [[PROF0]]
-; ORIGINS: 13:
-; ORIGINS-NEXT: call void @__msan_warning_with_origin_noreturn(i32 [[TMP6]]) #[[ATTR7]]
-; ORIGINS-NEXT: unreachable
-; ORIGINS: 14:
; ORIGINS-NEXT: call void @llvm.masked.compressstore.v16f32(<16 x float> [[VALUE:%.*]], float* [[PTR:%.*]], <16 x i1> [[MASK:%.*]])
; ORIGINS-NEXT: ret void
;
More information about the llvm-commits
mailing list