[llvm] ecb85b5 - [dfsan] Remove injectMetadataGlobals

Fangrui Song via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 3 09:24:01 PST 2023


Author: Fangrui Song
Date: 2023-01-03T09:23:55-08:00
New Revision: ecb85b5cd89f9797c538675ee3ab93e350c57bd5

URL: https://github.com/llvm/llvm-project/commit/ecb85b5cd89f9797c538675ee3ab93e350c57bd5
DIFF: https://github.com/llvm/llvm-project/commit/ecb85b5cd89f9797c538675ee3ab93e350c57bd5.diff

LOG: [dfsan] Remove injectMetadataGlobals

D97409 added injectMetadataGlobals to differentiate the shadow mode.
This feature has been unused and is unneeded after D103745 removed fast16 mode.

Reviewed By: browneee

Differential Revision: https://reviews.llvm.org/D140797

Added: 
    

Modified: 
    llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
    llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll
    llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll
    llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll
    llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
    llvm/test/Instrumentation/DataFlowSanitizer/array.ll
    llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll
    llvm/test/Instrumentation/DataFlowSanitizer/basic.ll
    llvm/test/Instrumentation/DataFlowSanitizer/callback.ll
    llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll
    llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll
    llvm/test/Instrumentation/DataFlowSanitizer/dont_combine_offset_labels_on_gep.ll
    llvm/test/Instrumentation/DataFlowSanitizer/extern_weak.ll
    llvm/test/Instrumentation/DataFlowSanitizer/load.ll
    llvm/test/Instrumentation/DataFlowSanitizer/lookup_table.ll
    llvm/test/Instrumentation/DataFlowSanitizer/memset.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_cached_shadows.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_load.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_mem_intrinsic.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_other_ops.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_store.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll
    llvm/test/Instrumentation/DataFlowSanitizer/origin_track_load.ll
    llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
    llvm/test/Instrumentation/DataFlowSanitizer/select.ll
    llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
    llvm/test/Instrumentation/DataFlowSanitizer/store.ll
    llvm/test/Instrumentation/DataFlowSanitizer/struct.ll
    llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll
    llvm/test/Instrumentation/DataFlowSanitizer/union.ll
    llvm/test/Instrumentation/DataFlowSanitizer/vector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index f4964df1f3266..95c05a5f0c498 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -515,7 +515,6 @@ class DataFlowSanitizer {
                                  FunctionType *NewFT);
   void initializeCallbackFunctions(Module &M);
   void initializeRuntimeFunctions(Module &M);
-  void injectMetadataGlobals(Module &M);
   bool initializeModule(Module &M);
 
   /// Advances \p OriginAddr to point to the next 32-bit origin and then loads
@@ -1465,26 +1464,6 @@ void DataFlowSanitizer::initializeCallbackFunctions(Module &M) {
   }
 }
 
-void DataFlowSanitizer::injectMetadataGlobals(Module &M) {
-  // These variables can be used:
-  // - by the runtime (to discover what the shadow width was, during
-  //   compilation)
-  // - in testing (to avoid hardcoding the shadow width and type but instead
-  //   extract them by pattern matching)
-  Type *IntTy = Type::getInt32Ty(*Ctx);
-  (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bits", IntTy, [&] {
-    return new GlobalVariable(
-        M, IntTy, /*isConstant=*/true, GlobalValue::WeakODRLinkage,
-        ConstantInt::get(IntTy, ShadowWidthBits), "__dfsan_shadow_width_bits");
-  });
-  (void)Mod->getOrInsertGlobal("__dfsan_shadow_width_bytes", IntTy, [&] {
-    return new GlobalVariable(M, IntTy, /*isConstant=*/true,
-                              GlobalValue::WeakODRLinkage,
-                              ConstantInt::get(IntTy, ShadowWidthBytes),
-                              "__dfsan_shadow_width_bytes");
-  });
-}
-
 bool DataFlowSanitizer::runImpl(
     Module &M, llvm::function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
   initializeModule(M);
@@ -1527,8 +1506,6 @@ bool DataFlowSanitizer::runImpl(
         "__dfsan_track_origins");
   });
 
-  injectMetadataGlobals(M);
-
   initializeCallbackFunctions(M);
   initializeRuntimeFunctions(M);
 

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll b/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll
index 020fa4de7c85e..98dceb4e19b4e 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/abilist.ll
@@ -2,9 +2,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 ; CHECK: i32 @discard(i32 %a, i32 %b)
 define i32 @discard(i32 %a, i32 %b) {
   ret i32 0
@@ -32,31 +29,31 @@ declare i32 @cb(i32)
 
 ; CHECK: @f.dfsan
 define void @f(i32 %x) {
-  ; CHECK: %[[LABELVA2:.*]] = alloca [2 x i[[#SBITS]]]
-  ; CHECK: %[[LABELVA1:.*]] = alloca [2 x i[[#SBITS]]]
-  ; CHECK: %[[LABELRETURN:.*]] = alloca i[[#SBITS]]
+  ; CHECK: %[[LABELVA2:.*]] = alloca [2 x i8]
+  ; CHECK: %[[LABELVA1:.*]] = alloca [2 x i8]
+  ; CHECK: %[[LABELRETURN:.*]] = alloca i8
 
-  ; CHECK: call void @__dfsw_custom1(i32 1, i32 2, i[[#SBITS]] zeroext 0, i[[#SBITS]] zeroext 0)
+  ; CHECK: call void @__dfsw_custom1(i32 1, i32 2, i8 zeroext 0, i8 zeroext 0)
   call void @custom1(i32 1, i32 2)
 
-  ; CHECK: call i32 @__dfsw_custom2(i32 1, i32 2, i[[#SBITS]] zeroext 0, i[[#SBITS]] zeroext 0, ptr %[[LABELRETURN]])
+  ; CHECK: call i32 @__dfsw_custom2(i32 1, i32 2, i8 zeroext 0, i8 zeroext 0, ptr %[[LABELRETURN]])
   call i32 @custom2(i32 1, i32 2)
 
-  ; CHECK: call void @__dfsw_customcb({{.*}} @cb.dfsan, i[[#SBITS]] zeroext 0)
+  ; CHECK: call void @__dfsw_customcb({{.*}} @cb.dfsan, i8 zeroext 0)
   call void @customcb(ptr @cb)
 
-  ; CHECK: %[[LABELVA1_0:.*]] = getelementptr inbounds [2 x i[[#SBITS]]], ptr %[[LABELVA1]], i32 0, i32 0
-  ; CHECK: store i[[#SBITS]] 0, ptr %[[LABELVA1_0]]
-  ; CHECK: %[[LABELVA1_1:.*]] = getelementptr inbounds [2 x i[[#SBITS]]], ptr %[[LABELVA1]], i32 0, i32 1
-  ; CHECK: store i[[#SBITS]] %{{.*}}, ptr %[[LABELVA1_1]]
-  ; CHECK: %[[LABELVA1_0A:.*]] = getelementptr inbounds [2 x i[[#SBITS]]], ptr %[[LABELVA1]], i32 0, i32 0
-  ; CHECK: call void (i32, i[[#SBITS]], ptr, ...) @__dfsw_custom3(i32 1, i[[#SBITS]] zeroext 0, ptr %[[LABELVA1_0A]], i32 2, i32 %{{.*}})
+  ; CHECK: %[[LABELVA1_0:.*]] = getelementptr inbounds [2 x i8], ptr %[[LABELVA1]], i32 0, i32 0
+  ; CHECK: store i8 0, ptr %[[LABELVA1_0]]
+  ; CHECK: %[[LABELVA1_1:.*]] = getelementptr inbounds [2 x i8], ptr %[[LABELVA1]], i32 0, i32 1
+  ; CHECK: store i8 %{{.*}}, ptr %[[LABELVA1_1]]
+  ; CHECK: %[[LABELVA1_0A:.*]] = getelementptr inbounds [2 x i8], ptr %[[LABELVA1]], i32 0, i32 0
+  ; CHECK: call void (i32, i8, ptr, ...) @__dfsw_custom3(i32 1, i8 zeroext 0, ptr %[[LABELVA1_0A]], i32 2, i32 %{{.*}})
 
   call void (i32, ...) @custom3(i32 1, i32 2, i32 %x)
 
-  ; CHECK: %[[LABELVA2_0:.*]] = getelementptr inbounds [2 x i[[#SBITS]]], ptr %[[LABELVA2]], i32 0, i32 0
-  ; CHECK: %[[LABELVA2_0A:.*]] = getelementptr inbounds [2 x i[[#SBITS]]], ptr %[[LABELVA2]], i32 0, i32 0
-  ; CHECK: call i32 (i32, i[[#SBITS]], ptr, ptr, ...) @__dfsw_custom4(i32 1, i[[#SBITS]] zeroext 0, ptr %[[LABELVA2_0A]], ptr %[[LABELRETURN]], i32 2, i32 3)
+  ; CHECK: %[[LABELVA2_0:.*]] = getelementptr inbounds [2 x i8], ptr %[[LABELVA2]], i32 0, i32 0
+  ; CHECK: %[[LABELVA2_0A:.*]] = getelementptr inbounds [2 x i8], ptr %[[LABELVA2]], i32 0, i32 0
+  ; CHECK: call i32 (i32, i8, ptr, ptr, ...) @__dfsw_custom4(i32 1, i8 zeroext 0, ptr %[[LABELVA2_0A]], ptr %[[LABELRETURN]], i32 2, i32 3)
   call i32 (i32, ...) @custom4(i32 1, i32 2, i32 3)
 
   ret void
@@ -79,9 +76,9 @@ define ptr @g(i32) {
 @adiscard = alias i32 (i32, i32), ptr @discard
 
 ; CHECK: define linkonce_odr i32 @"dfsw$custom2"(i32 %0, i32 %1)
-; CHECK: %[[LABELRETURN2:.*]] = alloca i[[#SBITS]]
-; CHECK: %[[RV:.*]] = call i32 @__dfsw_custom2(i32 {{.*}}, i32 {{.*}}, i[[#SBITS]] {{.*}}, i[[#SBITS]] {{.*}}, ptr %[[LABELRETURN2]])
-; CHECK: %[[RVSHADOW:.*]] = load i[[#SBITS]], ptr %[[LABELRETURN2]]
+; CHECK: %[[LABELRETURN2:.*]] = alloca i8
+; CHECK: %[[RV:.*]] = call i32 @__dfsw_custom2(i32 {{.*}}, i32 {{.*}}, i8 {{.*}}, i8 {{.*}}, ptr %[[LABELRETURN2]])
+; CHECK: %[[RVSHADOW:.*]] = load i8, ptr %[[LABELRETURN2]]
 ; CHECK: store {{.*}} @__dfsan_retval_tls
 ; CHECK: ret i32
 
@@ -91,8 +88,8 @@ define ptr @g(i32) {
 
 ; CHECK: define linkonce_odr i32 @"dfsw$custom4"(i32 %0, ...)
 
-; CHECK: declare void @__dfsw_custom1(i32, i32, i[[#SBITS]], i[[#SBITS]])
-; CHECK: declare i32 @__dfsw_custom2(i32, i32, i[[#SBITS]], i[[#SBITS]], ptr)
+; CHECK: declare void @__dfsw_custom1(i32, i32, i8, i8)
+; CHECK: declare i32 @__dfsw_custom2(i32, i32, i8, i8, ptr)
 
-; CHECK: declare void @__dfsw_custom3(i32, i[[#SBITS]], ptr, ...)
-; CHECK: declare i32 @__dfsw_custom4(i32, i[[#SBITS]], ptr, ptr, ...)
+; CHECK: declare void @__dfsw_custom3(i32, i8, ptr, ...)
+; CHECK: declare i32 @__dfsw_custom4(i32, i8, ptr, ptr, ...)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll b/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll
index c1aa9ffae6870..8cd70c5e13c40 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/abilist_aggregate.ll
@@ -2,9 +2,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 ; CHECK: define { i1, i7 } @functional({ i32, i1 } %a, [2 x i7] %b)
 define {i1, i7} @functional({i32, i1} %a, [2 x i7] %b) {
   %a1 = extractvalue {i32, i1} %a, 1
@@ -16,18 +13,18 @@ define {i1, i7} @functional({i32, i1} %a, [2 x i7] %b) {
 
 define {i1, i7} @call_functional({i32, i1} %a, [2 x i7] %b) {
   ; CHECK-LABEL: @call_functional.dfsan
-  ; CHECK-NEXT: %[[#REG:]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK-NEXT: %[[#REG+1]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK-NEXT: %[[#REG+2]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+1]], 0
-  ; CHECK-NEXT: %[[#REG+3]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+1]], 1
-  ; CHECK-NEXT: %[[#REG+4]] = or i[[#SBITS]] %[[#REG+2]], %[[#REG+3]]
-  ; CHECK-NEXT: %[[#REG+5]] = extractvalue [2 x i[[#SBITS]]] %[[#REG]], 0
-  ; CHECK-NEXT: %[[#REG+6]] = extractvalue [2 x i[[#SBITS]]] %[[#REG]], 1
-  ; CHECK-NEXT: %[[#REG+7]] = or i[[#SBITS]] %[[#REG+5]], %[[#REG+6]]
-  ; CHECK-NEXT: %[[#REG+8]] = or i[[#SBITS]] %[[#REG+4]], %[[#REG+7]]
-  ; CHECK-NEXT: %[[#REG+9]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] %[[#REG+8]], 0
-  ; CHECK-NEXT: %[[#REG+10]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#REG+9]], i[[#SBITS]] %[[#REG+8]], 1
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#REG+10]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: %[[#REG:]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK-NEXT: %[[#REG+1]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK-NEXT: %[[#REG+2]] = extractvalue { i8, i8 } %[[#REG+1]], 0
+  ; CHECK-NEXT: %[[#REG+3]] = extractvalue { i8, i8 } %[[#REG+1]], 1
+  ; CHECK-NEXT: %[[#REG+4]] = or i8 %[[#REG+2]], %[[#REG+3]]
+  ; CHECK-NEXT: %[[#REG+5]] = extractvalue [2 x i8] %[[#REG]], 0
+  ; CHECK-NEXT: %[[#REG+6]] = extractvalue [2 x i8] %[[#REG]], 1
+  ; CHECK-NEXT: %[[#REG+7]] = or i8 %[[#REG+5]], %[[#REG+6]]
+  ; CHECK-NEXT: %[[#REG+8]] = or i8 %[[#REG+4]], %[[#REG+7]]
+  ; CHECK-NEXT: %[[#REG+9]] = insertvalue { i8, i8 } undef, i8 %[[#REG+8]], 0
+  ; CHECK-NEXT: %[[#REG+10]] = insertvalue { i8, i8 } %[[#REG+9]], i8 %[[#REG+8]], 1
+  ; CHECK: store { i8, i8 } %[[#REG+10]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %r = call {i1, i7} @functional({i32, i1} %a, [2 x i7] %b)
   ret {i1, i7} %r
@@ -44,7 +41,7 @@ define {i1, i7} @discard({i32, i1} %a, [2 x i7] %b) {
 
 define {i1, i7} @call_discard({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: @call_discard.dfsan
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, ptr @__dfsan_retval_tls, align 2
+  ; CHECK: store { i8, i8 } zeroinitializer, ptr @__dfsan_retval_tls, align 2
 
   %r = call {i1, i7} @discard({i32, i1} %a, [2 x i7] %b)
   ret {i1, i7} %r
@@ -62,7 +59,7 @@ define {i1, i7} @uninstrumented({i32, i1} %a, [2 x i7] %b) {
 define {i1, i7} @call_uninstrumented({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: @call_uninstrumented.dfsan
   ; CHECK: call void @__dfsan_unimplemented
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, ptr @__dfsan_retval_tls, align 2
+  ; CHECK: store { i8, i8 } zeroinitializer, ptr @__dfsan_retval_tls, align 2
 
   %r = call {i1, i7} @uninstrumented({i32, i1} %a, [2 x i7] %b)
   ret {i1, i7} %r
@@ -70,20 +67,20 @@ define {i1, i7} @call_uninstrumented({i32, i1} %a, [2 x i7] %b) {
 
 define {i1, i7} @call_custom_with_ret({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: @call_custom_with_ret.dfsan
-  ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-  ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0
-  ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1
-  ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]]
-  ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0
-  ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1
-  ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]]
-  ; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], ptr %labelreturn)
-  ; CHECK: [[RE:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
-  ; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0
-  ; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK: %labelreturn = alloca i8, align 1
+  ; CHECK: [[B:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[A:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[A0:%.*]] = extractvalue { i8, i8 } [[A]], 0
+  ; CHECK: [[A1:%.*]] = extractvalue { i8, i8 } [[A]], 1
+  ; CHECK: [[A01:%.*]] = or i8 [[A0]], [[A1]]
+  ; CHECK: [[B0:%.*]] = extractvalue [2 x i8] [[B]], 0
+  ; CHECK: [[B1:%.*]] = extractvalue [2 x i8] [[B]], 1
+  ; CHECK: [[B01:%.*]] = or i8 [[B0]], [[B1]]
+  ; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %a, [2 x i7] %b, i8 zeroext [[A01]], i8 zeroext [[B01]], ptr %labelreturn)
+  ; CHECK: [[RE:%.*]] = load i8, ptr %labelreturn, align 1
+  ; CHECK: [[RS0:%.*]] = insertvalue { i8, i8 } undef, i8 [[RE]], 0
+  ; CHECK: [[RS1:%.*]] = insertvalue { i8, i8 } [[RS0]], i8 [[RE]], 1
+  ; CHECK: store { i8, i8 } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK: ret { i1, i7 } [[R]]
 
   %r = call {i1, i7} @custom_with_ret({i32, i1} %a, [2 x i7] %b)
@@ -92,15 +89,15 @@ define {i1, i7} @call_custom_with_ret({i32, i1} %a, [2 x i7] %b) {
 
 define void @call_custom_without_ret({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: @call_custom_without_ret.dfsan
-  ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0
-  ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1
-  ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]]
-  ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0
-  ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1
-  ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]]
-  ; CHECK: call void @__dfsw_custom_without_ret({ i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]])
+  ; CHECK: [[B:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[A:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[A0:%.*]] = extractvalue { i8, i8 } [[A]], 0
+  ; CHECK: [[A1:%.*]] = extractvalue { i8, i8 } [[A]], 1
+  ; CHECK: [[A01:%.*]] = or i8 [[A0]], [[A1]]
+  ; CHECK: [[B0:%.*]] = extractvalue [2 x i8] [[B]], 0
+  ; CHECK: [[B1:%.*]] = extractvalue [2 x i8] [[B]], 1
+  ; CHECK: [[B01:%.*]] = or i8 [[B0]], [[B1]]
+  ; CHECK: call void @__dfsw_custom_without_ret({ i32, i1 } %a, [2 x i7] %b, i8 zeroext [[A01]], i8 zeroext [[B01]])
 
   call void @custom_without_ret({i32, i1} %a, [2 x i7] %b)
   ret void
@@ -108,19 +105,19 @@ define void @call_custom_without_ret({i32, i1} %a, [2 x i7] %b) {
 
 define void @call_custom_varg({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: @call_custom_varg.dfsan
-  ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK: %labelva = alloca [1 x i[[#SBITS]]], align [[#SBYTES]]
-  ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0
-  ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1
-  ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]]
-  ; CHECK: [[V0:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], ptr %labelva, i32 0, i32 0
-  ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0
-  ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1
-  ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]]
-  ; CHECK: store i[[#SBITS]] [[B01]], ptr [[V0]], align [[#SBYTES]]
-  ; CHECK: [[V:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], ptr %labelva, i32 0, i32 0
-  ; CHECK: call void ({ i32, i1 }, i[[#SBITS]], ptr, ...) @__dfsw_custom_varg({ i32, i1 } %a, i[[#SBITS]] zeroext [[A01]], ptr [[V]], [2 x i7] %b)
+  ; CHECK: [[B:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: %labelva = alloca [1 x i8], align 1
+  ; CHECK: [[A:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[A0:%.*]] = extractvalue { i8, i8 } [[A]], 0
+  ; CHECK: [[A1:%.*]] = extractvalue { i8, i8 } [[A]], 1
+  ; CHECK: [[A01:%.*]] = or i8 [[A0]], [[A1]]
+  ; CHECK: [[V0:%.*]] = getelementptr inbounds [1 x i8], ptr %labelva, i32 0, i32 0
+  ; CHECK: [[B0:%.*]] = extractvalue [2 x i8] [[B]], 0
+  ; CHECK: [[B1:%.*]] = extractvalue [2 x i8] [[B]], 1
+  ; CHECK: [[B01:%.*]] = or i8 [[B0]], [[B1]]
+  ; CHECK: store i8 [[B01]], ptr [[V0]], align 1
+  ; CHECK: [[V:%.*]] = getelementptr inbounds [1 x i8], ptr %labelva, i32 0, i32 0
+  ; CHECK: call void ({ i32, i1 }, i8, ptr, ...) @__dfsw_custom_varg({ i32, i1 } %a, i8 zeroext [[A01]], ptr [[V]], [2 x i7] %b)
 
   call void ({i32, i1}, ...) @custom_varg({i32, i1} %a, [2 x i7] %b)
   ret void
@@ -128,20 +125,20 @@ define void @call_custom_varg({i32, i1} %a, [2 x i7] %b) {
 
 define {i1, i7} @call_custom_cb({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: define { i1, i7 } @call_custom_cb.dfsan({ i32, i1 } %a, [2 x i7] %b) {
-  ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-  ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0
-  ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1
-  ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]]
-  ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0
-  ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1
-  ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]]
-  ; CHECK: [[R:%.*]]  = call { i1, i7 } @__dfsw_custom_cb(ptr @cb.dfsan, { i32, i1 } %a, [2 x i7] %b, i[[#SBITS]] zeroext 0, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], ptr %labelreturn)
-  ; CHECK: [[RE:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
-  ; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0
-  ; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK: %labelreturn = alloca i8, align 1
+  ; CHECK: [[B:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[A:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[A0:%.*]] = extractvalue { i8, i8 } [[A]], 0
+  ; CHECK: [[A1:%.*]] = extractvalue { i8, i8 } [[A]], 1
+  ; CHECK: [[A01:%.*]] = or i8 [[A0]], [[A1]]
+  ; CHECK: [[B0:%.*]] = extractvalue [2 x i8] [[B]], 0
+  ; CHECK: [[B1:%.*]] = extractvalue [2 x i8] [[B]], 1
+  ; CHECK: [[B01:%.*]] = or i8 [[B0]], [[B1]]
+  ; CHECK: [[R:%.*]]  = call { i1, i7 } @__dfsw_custom_cb(ptr @cb.dfsan, { i32, i1 } %a, [2 x i7] %b, i8 zeroext 0, i8 zeroext [[A01]], i8 zeroext [[B01]], ptr %labelreturn)
+  ; CHECK: [[RE:%.*]] = load i8, ptr %labelreturn, align 1
+  ; CHECK: [[RS0:%.*]] = insertvalue { i8, i8 } undef, i8 [[RE]], 0
+  ; CHECK: [[RS1:%.*]] = insertvalue { i8, i8 } [[RS0]], i8 [[RE]], 1
+  ; CHECK: store { i8, i8 } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %r = call {i1, i7} @custom_cb(ptr @cb, {i32, i1} %a, [2 x i7] %b)
   ret {i1, i7} %r
@@ -156,13 +153,13 @@ define {i1, i7} @custom_cb(ptr %cb, {i32, i1} %a, [2 x i7] %b) {
 
 define {i1, i7} @cb({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: define { i1, i7 } @cb.dfsan({ i32, i1 } %a, [2 x i7] %b)
-  ; CHECK: [[BL:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK: [[AL:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK: [[AL1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[AL]], 1
-  ; CHECK: [[BL0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[BL]], 0
-  ; CHECK: [[RL0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, i[[#SBITS]] [[AL1]], 0
-  ; CHECK: [[RL:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RL0]], i[[#SBITS]] [[BL0]], 1
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK: [[BL:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[AL:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[AL1:%.*]] = extractvalue { i8, i8 } [[AL]], 1
+  ; CHECK: [[BL0:%.*]] = extractvalue [2 x i8] [[BL]], 0
+  ; CHECK: [[RL0:%.*]] = insertvalue { i8, i8 } zeroinitializer, i8 [[AL1]], 0
+  ; CHECK: [[RL:%.*]] = insertvalue { i8, i8 } [[RL0]], i8 [[BL0]], 1
+  ; CHECK: store { i8, i8 } [[RL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %a1 = extractvalue {i32, i1} %a, 1
   %b0 = extractvalue [2 x i7] %b, 0
@@ -173,47 +170,47 @@ define {i1, i7} @cb({i32, i1} %a, [2 x i7] %b) {
 
 define ptr @ret_custom() {
   ; CHECK: @ret_custom.dfsan
-  ; CHECK: store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; CHECK: store i8 0, ptr @__dfsan_retval_tls, align 2
   ; CHECK: ret {{.*}} @"dfsw$custom_with_ret"
   ret ptr @custom_with_ret
 }
 
 ; CHECK: define linkonce_odr { i1, i7 } @"dfsw$custom_cb"(ptr %0, { i32, i1 } %1, [2 x i7] %2) {
-; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
+; CHECK: %labelreturn = alloca i8, align 1
 ; COMM: TODO simplify the expression [[#mul(2,SBYTES) + max(SBYTES,2)]] to
 ; COMM: [[#mul(3,SBYTES)]], if shadow-tls-alignment is updated to match shadow
 ; COMM: width bytes.
-; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES) + max(SBYTES,2)]]) to ptr), align [[ALIGN:2]]
-; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-; CHECK: [[CB:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0
-; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1
-; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]]
-; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0
-; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1
-; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]]
-; CHECK: [[R:%.*]]  = call { i1, i7 } @__dfsw_custom_cb(ptr %0, { i32, i1 } %1, [2 x i7] %2, i[[#SBITS]] zeroext [[CB]], i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], ptr %labelreturn)
-; CHECK: [[RE:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
-; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0
-; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1
-; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+; CHECK: [[B:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+; CHECK: [[A:%.*]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+; CHECK: [[CB:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+; CHECK: [[A0:%.*]] = extractvalue { i8, i8 } [[A]], 0
+; CHECK: [[A1:%.*]] = extractvalue { i8, i8 } [[A]], 1
+; CHECK: [[A01:%.*]] = or i8 [[A0]], [[A1]]
+; CHECK: [[B0:%.*]] = extractvalue [2 x i8] [[B]], 0
+; CHECK: [[B1:%.*]] = extractvalue [2 x i8] [[B]], 1
+; CHECK: [[B01:%.*]] = or i8 [[B0]], [[B1]]
+; CHECK: [[R:%.*]]  = call { i1, i7 } @__dfsw_custom_cb(ptr %0, { i32, i1 } %1, [2 x i7] %2, i8 zeroext [[CB]], i8 zeroext [[A01]], i8 zeroext [[B01]], ptr %labelreturn)
+; CHECK: [[RE:%.*]] = load i8, ptr %labelreturn, align 1
+; CHECK: [[RS0:%.*]] = insertvalue { i8, i8 } undef, i8 [[RE]], 0
+; CHECK: [[RS1:%.*]] = insertvalue { i8, i8 } [[RS0]], i8 [[RE]], 1
+; CHECK: store { i8, i8 } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
 define {i1, i7} @custom_with_ret({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: define linkonce_odr { i1, i7 } @"dfsw$custom_with_ret"({ i32, i1 } %0, [2 x i7] %1)
-  ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-  ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0
-  ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1
-  ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]]
-  ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0
-  ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1
-  ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]]
-  ; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %0, [2 x i7] %1, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]], ptr %labelreturn)
-  ; CHECK: [[RE:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
-  ; CHECK: [[RS0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[RE]], 0
-  ; CHECK: [[RS1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[RS0]], i[[#SBITS]] [[RE]], 1
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK: %labelreturn = alloca i8, align 1
+  ; CHECK: [[B:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[A:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[A0:%.*]] = extractvalue { i8, i8 } [[A]], 0
+  ; CHECK: [[A1:%.*]] = extractvalue { i8, i8 } [[A]], 1
+  ; CHECK: [[A01:%.*]] = or i8 [[A0]], [[A1]]
+  ; CHECK: [[B0:%.*]] = extractvalue [2 x i8] [[B]], 0
+  ; CHECK: [[B1:%.*]] = extractvalue [2 x i8] [[B]], 1
+  ; CHECK: [[B01:%.*]] = or i8 [[B0]], [[B1]]
+  ; CHECK: [[R:%.*]] = call { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 } %0, [2 x i7] %1, i8 zeroext [[A01]], i8 zeroext [[B01]], ptr %labelreturn)
+  ; CHECK: [[RE:%.*]] = load i8, ptr %labelreturn, align 1
+  ; CHECK: [[RS0:%.*]] = insertvalue { i8, i8 } undef, i8 [[RE]], 0
+  ; CHECK: [[RS1:%.*]] = insertvalue { i8, i8 } [[RS0]], i8 [[RE]], 1
+  ; CHECK: store { i8, i8 } [[RS1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK: ret { i1, i7 } [[R]]
   %a1 = extractvalue {i32, i1} %a, 1
   %b0 = extractvalue [2 x i7] %b, 0
@@ -224,15 +221,15 @@ define {i1, i7} @custom_with_ret({i32, i1} %a, [2 x i7] %b) {
 
 define void @custom_without_ret({i32, i1} %a, [2 x i7] %b) {
   ; CHECK: define linkonce_odr void @"dfsw$custom_without_ret"({ i32, i1 } %0, [2 x i7] %1)
-  ; CHECK: [[B:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; CHECK: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK: [[A0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 0
-  ; CHECK: [[A1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[A]], 1
-  ; CHECK: [[A01:%.*]] = or i[[#SBITS]] [[A0]], [[A1]]
-  ; CHECK: [[B0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 0
-  ; CHECK: [[B1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[B]], 1
-  ; CHECK: [[B01:%.*]] = or i[[#SBITS]] [[B0]], [[B1]]
-  ; CHECK: call void @__dfsw_custom_without_ret({ i32, i1 } %0, [2 x i7] %1, i[[#SBITS]] zeroext [[A01]], i[[#SBITS]] zeroext [[B01]])
+  ; CHECK: [[B:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[A:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[A0:%.*]] = extractvalue { i8, i8 } [[A]], 0
+  ; CHECK: [[A1:%.*]] = extractvalue { i8, i8 } [[A]], 1
+  ; CHECK: [[A01:%.*]] = or i8 [[A0]], [[A1]]
+  ; CHECK: [[B0:%.*]] = extractvalue [2 x i8] [[B]], 0
+  ; CHECK: [[B1:%.*]] = extractvalue [2 x i8] [[B]], 1
+  ; CHECK: [[B01:%.*]] = or i8 [[B0]], [[B1]]
+  ; CHECK: call void @__dfsw_custom_without_ret({ i32, i1 } %0, [2 x i7] %1, i8 zeroext [[A01]], i8 zeroext [[B01]])
   ; CHECK: ret
   ret void
 }
@@ -244,8 +241,8 @@ define void @custom_varg({i32, i1} %a, ...) {
   ret void
 }
 
-; CHECK: declare { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], ptr)
-; CHECK: declare void @__dfsw_custom_without_ret({ i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]])
-; CHECK: declare void @__dfsw_custom_varg({ i32, i1 }, i[[#SBITS]], ptr, ...)
+; CHECK: declare { i1, i7 } @__dfsw_custom_with_ret({ i32, i1 }, [2 x i7], i8, i8, ptr)
+; CHECK: declare void @__dfsw_custom_without_ret({ i32, i1 }, [2 x i7], i8, i8)
+; CHECK: declare void @__dfsw_custom_varg({ i32, i1 }, i8, ptr, ...)
 
-; CHECK: declare { i1, i7 } @__dfsw_custom_cb(ptr, { i32, i1 }, [2 x i7], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], ptr)
+; CHECK: declare { i1, i7 } @__dfsw_custom_cb(ptr, { i32, i1 }, [2 x i7], i8, i8, i8, ptr)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll b/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll
index e8686a2a4b69c..0f8c078be5274 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/args-unreachable-bb.ll
@@ -2,9 +2,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 ; CHECK-LABEL: @unreachable_bb1.dfsan
 define i8 @unreachable_bb1() {
   ; CHECK: ret i8 1

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll b/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
index 0269044c1d394..8c9eb5fa42fc3 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/arith.ll
@@ -2,16 +2,13 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i8 @add(i8 %a, i8 %b) {
   ; CHECK: @add.dfsan
-  ; CHECK-DAG: %[[#ALABEL:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; CHECK-DAG: %[[#BLABEL:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; CHECK: %[[#UNION:]] = or i[[#SBITS]] %[[#ALABEL]], %[[#BLABEL]]
+  ; CHECK-DAG: %[[#ALABEL:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK-DAG: %[[#BLABEL:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; CHECK: %[[#UNION:]] = or i8 %[[#ALABEL]], %[[#BLABEL]]
   ; CHECK: %c = add i8 %a, %b
-  ; CHECK: store i[[#SBITS]] %[[#UNION]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK: store i8 %[[#UNION]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK: ret i8 %c
   %c = add i8 %a, %b
   ret i8 %c
@@ -21,7 +18,7 @@ define i8 @sub(i8 %a, i8 %b) {
   ; CHECK: @sub.dfsan
   ; CHECK: load{{.*}}__dfsan_arg_tls
   ; CHECK: load{{.*}}__dfsan_arg_tls
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   ; CHECK: %c = sub i8 %a, %b
   ; CHECK: store{{.*}}__dfsan_retval_tls
   ; CHECK: ret i8 %c
@@ -33,7 +30,7 @@ define i8 @mul(i8 %a, i8 %b) {
   ; CHECK: @mul.dfsan
   ; CHECK: load{{.*}}__dfsan_arg_tls
   ; CHECK: load{{.*}}__dfsan_arg_tls
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   ; CHECK: %c = mul i8 %a, %b
   ; CHECK: store{{.*}}__dfsan_retval_tls
   ; CHECK: ret i8 %c
@@ -45,7 +42,7 @@ define i8 @sdiv(i8 %a, i8 %b) {
   ; CHECK: @sdiv.dfsan
   ; CHECK: load{{.*}}__dfsan_arg_tls
   ; CHECK: load{{.*}}__dfsan_arg_tls
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   ; CHECK: %c = sdiv i8 %a, %b
   ; CHECK: store{{.*}}__dfsan_retval_tls
   ; CHECK: ret i8 %c
@@ -57,7 +54,7 @@ define i8 @udiv(i8 %a, i8 %b) {
   ; CHECK: @udiv.dfsan
   ; CHECK: load{{.*}}__dfsan_arg_tls
   ; CHECK: load{{.*}}__dfsan_arg_tls
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   ; CHECK: %c = udiv i8 %a, %b
   ; CHECK: store{{.*}}__dfsan_retval_tls
   ; CHECK: ret i8 %c

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/array.ll b/llvm/test/Instrumentation/DataFlowSanitizer/array.ll
index c8c93efb9b980..5642edc413df1 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/array.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/array.ll
@@ -8,24 +8,21 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define [4 x i8] @pass_array([4 x i8] %a) {
   ; NO_COMBINE_LOAD_PTR: @pass_array.dfsan
-  ; NO_COMBINE_LOAD_PTR: %1 = load [4 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; NO_COMBINE_LOAD_PTR: store [4 x i[[#SBITS]]] %1, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_COMBINE_LOAD_PTR: %1 = load [4 x i8], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; NO_COMBINE_LOAD_PTR: store [4 x i8] %1, ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   ; DEBUG_NONZERO_LABELS: @pass_array.dfsan
-  ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load [4 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 0
-  ; DEBUG_NONZERO_LABELS: [[L1:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 1
-  ; DEBUG_NONZERO_LABELS: [[L01:%.*]] = or i[[#SBITS]] [[L0]], [[L1]]
-  ; DEBUG_NONZERO_LABELS: [[L2:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 2
-  ; DEBUG_NONZERO_LABELS: [[L012:%.*]] = or i[[#SBITS]] [[L01]], [[L2]]
-  ; DEBUG_NONZERO_LABELS: [[L3:%.*]] = extractvalue [4 x i[[#SBITS]]] [[L]], 3
-  ; DEBUG_NONZERO_LABELS: [[L0123:%.*]] = or i[[#SBITS]] [[L012]], [[L3]]
-  ; DEBUG_NONZERO_LABELS: {{.*}} = icmp ne i[[#SBITS]] [[L0123]], 0
+  ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load [4 x i8], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue [4 x i8] [[L]], 0
+  ; DEBUG_NONZERO_LABELS: [[L1:%.*]] = extractvalue [4 x i8] [[L]], 1
+  ; DEBUG_NONZERO_LABELS: [[L01:%.*]] = or i8 [[L0]], [[L1]]
+  ; DEBUG_NONZERO_LABELS: [[L2:%.*]] = extractvalue [4 x i8] [[L]], 2
+  ; DEBUG_NONZERO_LABELS: [[L012:%.*]] = or i8 [[L01]], [[L2]]
+  ; DEBUG_NONZERO_LABELS: [[L3:%.*]] = extractvalue [4 x i8] [[L]], 3
+  ; DEBUG_NONZERO_LABELS: [[L0123:%.*]] = or i8 [[L012]], [[L3]]
+  ; DEBUG_NONZERO_LABELS: {{.*}} = icmp ne i8 [[L0123]], 0
   ; DEBUG_NONZERO_LABELS: call void @__dfsan_nonzero_label()
 
   ret [4 x i8] %a
@@ -35,30 +32,30 @@ define [4 x i8] @pass_array([4 x i8] %a) {
 
 define %ArrayOfStruct @pass_array_of_struct(%ArrayOfStruct %as) {
   ; NO_COMBINE_LOAD_PTR: @pass_array_of_struct.dfsan
-  ; NO_COMBINE_LOAD_PTR: %1 = load [4 x { i[[#SBITS]], i[[#SBITS]] }], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; NO_COMBINE_LOAD_PTR: store [4 x { i[[#SBITS]], i[[#SBITS]] }] %1, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_COMBINE_LOAD_PTR: %1 = load [4 x { i8, i8 }], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; NO_COMBINE_LOAD_PTR: store [4 x { i8, i8 }] %1, ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   ret %ArrayOfStruct %as
 }
 
 define ptr @alloca_ret_array() {
   ; NO_COMBINE_LOAD_PTR: @alloca_ret_array.dfsan
-  ; NO_COMBINE_LOAD_PTR: store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR: store i8 0, ptr @__dfsan_retval_tls, align 2
   %p = alloca [4 x i1]
   ret ptr %p
 }
 
 define [4 x i1] @load_alloca_array() {
   ; NO_COMBINE_LOAD_PTR-LABEL: @load_alloca_array.dfsan
-  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R:]] = alloca i[[#SBITS]], align [[#SBYTES]]
+  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R:]] = alloca i8, align 1
   ; NO_COMBINE_LOAD_PTR-NEXT: %p = alloca [4 x i1]
-  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+1]] = load i[[#SBITS]], ptr %[[#R]], align [[#SBYTES]]
-  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+2]] = insertvalue [4 x i[[#SBITS]]] undef, i[[#SBITS]] %[[#R+1]], 0
-  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+3]] = insertvalue [4 x i[[#SBITS]]] %[[#R+2]], i[[#SBITS]] %[[#R+1]], 1
-  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+4]] = insertvalue [4 x i[[#SBITS]]] %[[#R+3]], i[[#SBITS]] %[[#R+1]], 2
-  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+5]] = insertvalue [4 x i[[#SBITS]]] %[[#R+4]], i[[#SBITS]] %[[#R+1]], 3
+  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+1]] = load i8, ptr %[[#R]], align 1
+  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+2]] = insertvalue [4 x i8] undef, i8 %[[#R+1]], 0
+  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+3]] = insertvalue [4 x i8] %[[#R+2]], i8 %[[#R+1]], 1
+  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+4]] = insertvalue [4 x i8] %[[#R+3]], i8 %[[#R+1]], 2
+  ; NO_COMBINE_LOAD_PTR-NEXT: %[[#R+5]] = insertvalue [4 x i8] %[[#R+4]], i8 %[[#R+1]], 3
   ; NO_COMBINE_LOAD_PTR-NEXT: %a = load [4 x i1], ptr %p
-  ; NO_COMBINE_LOAD_PTR-NEXT: store [4 x i[[#SBITS]]] %[[#R+5]], ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR-NEXT: store [4 x i8] %[[#R+5]], ptr @__dfsan_retval_tls, align 2
   ; NO_COMBINE_LOAD_PTR-NEXT: ret [4 x i1] %a
 
   %p = alloca [4 x i1]
@@ -68,27 +65,27 @@ define [4 x i1] @load_alloca_array() {
 
 define [0 x i1] @load_array0(ptr %p) {
   ; NO_COMBINE_LOAD_PTR: @load_array0.dfsan
-  ; NO_COMBINE_LOAD_PTR: store [0 x i[[#SBITS]]] zeroinitializer, ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR: store [0 x i8] zeroinitializer, ptr @__dfsan_retval_tls, align 2
   %a = load [0 x i1], ptr %p
   ret [0 x i1] %a
 }
 
 define [1 x i1] @load_array1(ptr %p) {
   ; NO_COMBINE_LOAD_PTR: @load_array1.dfsan
-  ; NO_COMBINE_LOAD_PTR: [[L:%.*]] = load i[[#SBITS]],
-  ; NO_COMBINE_LOAD_PTR: [[S:%.*]] = insertvalue [1 x i[[#SBITS]]] undef, i[[#SBITS]] [[L]], 0
-  ; NO_COMBINE_LOAD_PTR: store [1 x i[[#SBITS]]] [[S]], ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR: [[L:%.*]] = load i8,
+  ; NO_COMBINE_LOAD_PTR: [[S:%.*]] = insertvalue [1 x i8] undef, i8 [[L]], 0
+  ; NO_COMBINE_LOAD_PTR: store [1 x i8] [[S]], ptr @__dfsan_retval_tls, align 2
 
   ; EVENT_CALLBACKS: @load_array1.dfsan
-  ; EVENT_CALLBACKS: [[L:%.*]] = or i[[#SBITS]]
-  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i[[#SBITS]] zeroext [[L]], ptr {{.*}})
+  ; EVENT_CALLBACKS: [[L:%.*]] = or i8
+  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i8 zeroext [[L]], ptr {{.*}})
 
   ; FAST: @load_array1.dfsan
-  ; FAST: [[P:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[L:%.*]] = load i[[#SBITS]], ptr {{.*}}, align [[#SBYTES]]
-  ; FAST: [[U:%.*]] = or i[[#SBITS]] [[L]], [[P]]
-  ; FAST: [[S1:%.*]] = insertvalue [1 x i[[#SBITS]]] undef, i[[#SBITS]] [[U]], 0
-  ; FAST: store [1 x i[[#SBITS]]] [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[P:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[L:%.*]] = load i8, ptr {{.*}}, align 1
+  ; FAST: [[U:%.*]] = or i8 [[L]], [[P]]
+  ; FAST: [[S1:%.*]] = insertvalue [1 x i8] undef, i8 [[U]], 0
+  ; FAST: store [1 x i8] [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %a = load [1 x i1], ptr %p
   ret [1 x i1] %a
@@ -96,54 +93,54 @@ define [1 x i1] @load_array1(ptr %p) {
 
 define [2 x i1] @load_array2(ptr %p) {
   ; NO_COMBINE_LOAD_PTR: @load_array2.dfsan
-  ; NO_COMBINE_LOAD_PTR: [[P1:%.*]] = getelementptr i[[#SBITS]], ptr [[P0:%.*]], i64 1
-  ; NO_COMBINE_LOAD_PTR-DAG: [[E1:%.*]] = load i[[#SBITS]], ptr [[P1]], align [[#SBYTES]]
-  ; NO_COMBINE_LOAD_PTR-DAG: [[E0:%.*]] = load i[[#SBITS]], ptr [[P0]], align [[#SBYTES]]
-  ; NO_COMBINE_LOAD_PTR: [[U:%.*]] = or i[[#SBITS]] [[E0]], [[E1]]
-  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [2 x i[[#SBITS]]] undef, i[[#SBITS]] [[U]], 0
-  ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [2 x i[[#SBITS]]] [[S1]], i[[#SBITS]] [[U]], 1
-  ; NO_COMBINE_LOAD_PTR: store [2 x i[[#SBITS]]] [[S2]], ptr @__dfsan_retval_tls, align [[ALIGN:2]]
+  ; NO_COMBINE_LOAD_PTR: [[P1:%.*]] = getelementptr i8, ptr [[P0:%.*]], i64 1
+  ; NO_COMBINE_LOAD_PTR-DAG: [[E1:%.*]] = load i8, ptr [[P1]], align 1
+  ; NO_COMBINE_LOAD_PTR-DAG: [[E0:%.*]] = load i8, ptr [[P0]], align 1
+  ; NO_COMBINE_LOAD_PTR: [[U:%.*]] = or i8 [[E0]], [[E1]]
+  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [2 x i8] undef, i8 [[U]], 0
+  ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [2 x i8] [[S1]], i8 [[U]], 1
+  ; NO_COMBINE_LOAD_PTR: store [2 x i8] [[S2]], ptr @__dfsan_retval_tls, align [[ALIGN:2]]
 
   ; EVENT_CALLBACKS: @load_array2.dfsan
-  ; EVENT_CALLBACKS: [[O1:%.*]] = or i[[#SBITS]]
-  ; EVENT_CALLBACKS: [[O2:%.*]] = or i[[#SBITS]] [[O1]]
-  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i[[#SBITS]] zeroext [[O2]], ptr {{.*}})
+  ; EVENT_CALLBACKS: [[O1:%.*]] = or i8
+  ; EVENT_CALLBACKS: [[O2:%.*]] = or i8 [[O1]]
+  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i8 zeroext [[O2]], ptr {{.*}})
 
   ; FAST: @load_array2.dfsan
-  ; FAST: [[P:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[O:%.*]] = or i[[#SBITS]]
-  ; FAST: [[U:%.*]] = or i[[#SBITS]] [[O]], [[P]]
-  ; FAST: [[S:%.*]] = insertvalue [2 x i[[#SBITS]]] undef, i[[#SBITS]] [[U]], 0
-  ; FAST: [[S1:%.*]] = insertvalue [2 x i[[#SBITS]]] [[S]], i[[#SBITS]] [[U]], 1
-  ; FAST: store [2 x i[[#SBITS]]] [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[P:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[O:%.*]] = or i8
+  ; FAST: [[U:%.*]] = or i8 [[O]], [[P]]
+  ; FAST: [[S:%.*]] = insertvalue [2 x i8] undef, i8 [[U]], 0
+  ; FAST: [[S1:%.*]] = insertvalue [2 x i8] [[S]], i8 [[U]], 1
+  ; FAST: store [2 x i8] [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   %a = load [2 x i1], ptr %p
   ret [2 x i1] %a
 }
 
 define [4 x i1] @load_array4(ptr %p) {
   ; NO_COMBINE_LOAD_PTR: @load_array4.dfsan
-  ; NO_COMBINE_LOAD_PTR: [[T:%.*]] = trunc i[[#mul(4, SBITS)]] {{.*}} to i[[#SBITS]]
-  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [4 x i[[#SBITS]]] undef, i[[#SBITS]] [[T]], 0
-  ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S1]], i[[#SBITS]] [[T]], 1
-  ; NO_COMBINE_LOAD_PTR: [[S3:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S2]], i[[#SBITS]] [[T]], 2
-  ; NO_COMBINE_LOAD_PTR: [[S4:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S3]], i[[#SBITS]] [[T]], 3
-  ; NO_COMBINE_LOAD_PTR: store [4 x i[[#SBITS]]] [[S4]], ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR: [[T:%.*]] = trunc i32 {{.*}} to i8
+  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue [4 x i8] undef, i8 [[T]], 0
+  ; NO_COMBINE_LOAD_PTR: [[S2:%.*]] = insertvalue [4 x i8] [[S1]], i8 [[T]], 1
+  ; NO_COMBINE_LOAD_PTR: [[S3:%.*]] = insertvalue [4 x i8] [[S2]], i8 [[T]], 2
+  ; NO_COMBINE_LOAD_PTR: [[S4:%.*]] = insertvalue [4 x i8] [[S3]], i8 [[T]], 3
+  ; NO_COMBINE_LOAD_PTR: store [4 x i8] [[S4]], ptr @__dfsan_retval_tls, align 2
 
   ; EVENT_CALLBACKS: @load_array4.dfsan
-  ; EVENT_CALLBACKS: [[O0:%.*]] = or i[[#mul(4, SBITS)]]
-  ; EVENT_CALLBACKS: [[O1:%.*]] = or i[[#mul(4, SBITS)]] [[O0]]
-  ; EVENT_CALLBACKS: [[O2:%.*]] = trunc i[[#mul(4, SBITS)]] [[O1]] to i[[#SBITS]]
-  ; EVENT_CALLBACKS: [[O3:%.*]] = or i[[#SBITS]] [[O2]]
-  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i[[#SBITS]] zeroext [[O3]], ptr {{.*}})
+  ; EVENT_CALLBACKS: [[O0:%.*]] = or i32
+  ; EVENT_CALLBACKS: [[O1:%.*]] = or i32 [[O0]]
+  ; EVENT_CALLBACKS: [[O2:%.*]] = trunc i32 [[O1]] to i8
+  ; EVENT_CALLBACKS: [[O3:%.*]] = or i8 [[O2]]
+  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i8 zeroext [[O3]], ptr {{.*}})
 
   ; FAST: @load_array4.dfsan
-  ; FAST: [[T:%.*]] = trunc i[[#mul(4, SBITS)]] {{.*}} to i[[#SBITS]]
-  ; FAST: [[O:%.*]] = or i[[#SBITS]] [[T]]
-  ; FAST: [[S1:%.*]] = insertvalue [4 x i[[#SBITS]]] undef, i[[#SBITS]] [[O]], 0
-  ; FAST: [[S2:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S1]], i[[#SBITS]] [[O]], 1
-  ; FAST: [[S3:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S2]], i[[#SBITS]] [[O]], 2
-  ; FAST: [[S4:%.*]] = insertvalue [4 x i[[#SBITS]]] [[S3]], i[[#SBITS]] [[O]], 3
-  ; FAST: store [4 x i[[#SBITS]]] [[S4]], ptr @__dfsan_retval_tls, align 2
+  ; FAST: [[T:%.*]] = trunc i32 {{.*}} to i8
+  ; FAST: [[O:%.*]] = or i8 [[T]]
+  ; FAST: [[S1:%.*]] = insertvalue [4 x i8] undef, i8 [[O]], 0
+  ; FAST: [[S2:%.*]] = insertvalue [4 x i8] [[S1]], i8 [[O]], 1
+  ; FAST: [[S3:%.*]] = insertvalue [4 x i8] [[S2]], i8 [[O]], 2
+  ; FAST: [[S4:%.*]] = insertvalue [4 x i8] [[S3]], i8 [[O]], 3
+  ; FAST: store [4 x i8] [[S4]], ptr @__dfsan_retval_tls, align 2
 
   %a = load [4 x i1], ptr %p
   ret [4 x i1] %a
@@ -151,36 +148,36 @@ define [4 x i1] @load_array4(ptr %p) {
 
 define i1 @extract_array([4 x i1] %a) {
   ; NO_COMBINE_LOAD_PTR: @extract_array.dfsan
-  ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = extractvalue [4 x i[[#SBITS]]] [[AM]], 2
-  ; NO_COMBINE_LOAD_PTR: store i[[#SBITS]] [[EM]], ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i8], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = extractvalue [4 x i8] [[AM]], 2
+  ; NO_COMBINE_LOAD_PTR: store i8 [[EM]], ptr @__dfsan_retval_tls, align 2
   %e2 = extractvalue [4 x i1] %a, 2
   ret i1 %e2
 }
 
 define [4 x i1] @insert_array([4 x i1] %a, i1 %e2) {
   ; NO_COMBINE_LOAD_PTR: @insert_array.dfsan
-  ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = load i[[#SBITS]], ptr
-  ; NO_COMBINE_LOAD_PTR-SAME: inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(4, SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; NO_COMBINE_LOAD_PTR: [[AM1:%.*]] = insertvalue [4 x i[[#SBITS]]] [[AM]], i[[#SBITS]] [[EM]], 0
-  ; NO_COMBINE_LOAD_PTR: store [4 x i[[#SBITS]]] [[AM1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_COMBINE_LOAD_PTR: [[EM:%.*]] = load i8, ptr
+  ; NO_COMBINE_LOAD_PTR-SAME: inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; NO_COMBINE_LOAD_PTR: [[AM:%.*]] = load [4 x i8], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; NO_COMBINE_LOAD_PTR: [[AM1:%.*]] = insertvalue [4 x i8] [[AM]], i8 [[EM]], 0
+  ; NO_COMBINE_LOAD_PTR: store [4 x i8] [[AM1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   %a1 = insertvalue [4 x i1] %a, i1 %e2, 0
   ret [4 x i1] %a1
 }
 
 define void @store_alloca_array([4 x i1] %a) {
   ; FAST: @store_alloca_array.dfsan
-  ; FAST: [[S:%.*]] = load [4 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[SP:%.*]] = alloca i[[#SBITS]], align [[#SBYTES]]
-  ; FAST: [[E0:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 0
-  ; FAST: [[E1:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 1
-  ; FAST: [[E01:%.*]] = or i[[#SBITS]] [[E0]], [[E1]]
-  ; FAST: [[E2:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 2
-  ; FAST: [[E012:%.*]] = or i[[#SBITS]] [[E01]], [[E2]]
-  ; FAST: [[E3:%.*]] = extractvalue [4 x i[[#SBITS]]] [[S]], 3
-  ; FAST: [[E0123:%.*]] = or i[[#SBITS]] [[E012]], [[E3]]
-  ; FAST: store i[[#SBITS]] [[E0123]], ptr [[SP]], align [[#SBYTES]]
+  ; FAST: [[S:%.*]] = load [4 x i8], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[SP:%.*]] = alloca i8, align 1
+  ; FAST: [[E0:%.*]] = extractvalue [4 x i8] [[S]], 0
+  ; FAST: [[E1:%.*]] = extractvalue [4 x i8] [[S]], 1
+  ; FAST: [[E01:%.*]] = or i8 [[E0]], [[E1]]
+  ; FAST: [[E2:%.*]] = extractvalue [4 x i8] [[S]], 2
+  ; FAST: [[E012:%.*]] = or i8 [[E01]], [[E2]]
+  ; FAST: [[E3:%.*]] = extractvalue [4 x i8] [[S]], 3
+  ; FAST: [[E0123:%.*]] = or i8 [[E012]], [[E3]]
+  ; FAST: store i8 [[E0123]], ptr [[SP]], align 1
   %p = alloca [4 x i1]
   store [4 x i1] %a, ptr %p
   ret void
@@ -188,33 +185,33 @@ define void @store_alloca_array([4 x i1] %a) {
 
 define void @store_zero_array(ptr %p) {
   ; FAST: @store_zero_array.dfsan
-  ; FAST: store i[[#mul(4, SBITS)]] 0, ptr {{.*}}
+  ; FAST: store i32 0, ptr {{.*}}
   store [4 x i1] zeroinitializer, ptr %p
   ret void
 }
 
 define void @store_array2([2 x i1] %a, ptr %p) {
   ; EVENT_CALLBACKS: @store_array2.dfsan
-  ; EVENT_CALLBACKS: [[E12:%.*]] = or i[[#SBITS]]
-  ; EVENT_CALLBACKS: call void @__dfsan_store_callback(i[[#SBITS]] zeroext [[E12]], ptr %p)
+  ; EVENT_CALLBACKS: [[E12:%.*]] = or i8
+  ; EVENT_CALLBACKS: call void @__dfsan_store_callback(i8 zeroext [[E12]], ptr %p)
 
   ; FAST: @store_array2.dfsan
-  ; FAST: [[S:%.*]] = load [2 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[E1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[S]], 0
-  ; FAST: [[E2:%.*]] = extractvalue [2 x i[[#SBITS]]] [[S]], 1
-  ; FAST: [[E12:%.*]] = or i[[#SBITS]] [[E1]], [[E2]]
-  ; FAST: [[SP0:%.*]] = getelementptr i[[#SBITS]], ptr [[SP:%.*]], i32 0
-  ; FAST: store i[[#SBITS]] [[E12]], ptr [[SP0]], align [[#SBYTES]]
-  ; FAST: [[SP1:%.*]] = getelementptr i[[#SBITS]], ptr [[SP]], i32 1
-  ; FAST: store i[[#SBITS]] [[E12]], ptr [[SP1]], align [[#SBYTES]]
+  ; FAST: [[S:%.*]] = load [2 x i8], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[E1:%.*]] = extractvalue [2 x i8] [[S]], 0
+  ; FAST: [[E2:%.*]] = extractvalue [2 x i8] [[S]], 1
+  ; FAST: [[E12:%.*]] = or i8 [[E1]], [[E2]]
+  ; FAST: [[SP0:%.*]] = getelementptr i8, ptr [[SP:%.*]], i32 0
+  ; FAST: store i8 [[E12]], ptr [[SP0]], align 1
+  ; FAST: [[SP1:%.*]] = getelementptr i8, ptr [[SP]], i32 1
+  ; FAST: store i8 [[E12]], ptr [[SP1]], align 1
 
   ; COMBINE_STORE_PTR: @store_array2.dfsan
-  ; COMBINE_STORE_PTR: [[O:%.*]] = or i[[#SBITS]]
-  ; COMBINE_STORE_PTR: [[U:%.*]] = or i[[#SBITS]] [[O]]
-  ; COMBINE_STORE_PTR: [[P1:%.*]] = getelementptr i[[#SBITS]], ptr [[P:%.*]], i32 0
-  ; COMBINE_STORE_PTR: store i[[#SBITS]] [[U]], ptr [[P1]], align [[#SBYTES]]
-  ; COMBINE_STORE_PTR: [[P2:%.*]] = getelementptr i[[#SBITS]], ptr [[P]], i32 1
-  ; COMBINE_STORE_PTR: store i[[#SBITS]] [[U]], ptr [[P2]], align [[#SBYTES]]
+  ; COMBINE_STORE_PTR: [[O:%.*]] = or i8
+  ; COMBINE_STORE_PTR: [[U:%.*]] = or i8 [[O]]
+  ; COMBINE_STORE_PTR: [[P1:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 0
+  ; COMBINE_STORE_PTR: store i8 [[U]], ptr [[P1]], align 1
+  ; COMBINE_STORE_PTR: [[P2:%.*]] = getelementptr i8, ptr [[P]], i32 1
+  ; COMBINE_STORE_PTR: store i8 [[U]], ptr [[P2]], align 1
 
   store [2 x i1] %a, ptr %p
   ret void
@@ -222,70 +219,70 @@ define void @store_array2([2 x i1] %a, ptr %p) {
 
 define void @store_array17([17 x i1] %a, ptr %p) {
   ; FAST: @store_array17.dfsan
-  ; FAST: %[[#R:]]   = load [17 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align 2
-  ; FAST: %[[#R+1]]  = extractvalue [17 x i[[#SBITS]]] %[[#R]], 0
-  ; FAST: %[[#R+2]]  = extractvalue [17 x i[[#SBITS]]] %[[#R]], 1
-  ; FAST: %[[#R+3]]  = or i[[#SBITS]] %[[#R+1]], %[[#R+2]]
-  ; FAST: %[[#R+4]]  = extractvalue [17 x i[[#SBITS]]] %[[#R]], 2
-  ; FAST: %[[#R+5]]  = or i[[#SBITS]] %[[#R+3]], %[[#R+4]]
-  ; FAST: %[[#R+6]]  = extractvalue [17 x i[[#SBITS]]] %[[#R]], 3
-  ; FAST: %[[#R+7]]  = or i[[#SBITS]] %[[#R+5]], %[[#R+6]]
-  ; FAST: %[[#R+8]]  = extractvalue [17 x i[[#SBITS]]] %[[#R]], 4
-  ; FAST: %[[#R+9]]  = or i[[#SBITS]] %[[#R+7]], %[[#R+8]]
-  ; FAST: %[[#R+10]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 5
-  ; FAST: %[[#R+11]] = or i[[#SBITS]] %[[#R+9]], %[[#R+10]]
-  ; FAST: %[[#R+12]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 6
-  ; FAST: %[[#R+13]] = or i[[#SBITS]] %[[#R+11]], %[[#R+12]]
-  ; FAST: %[[#R+14]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 7
-  ; FAST: %[[#R+15]] = or i[[#SBITS]] %[[#R+13]], %[[#R+14]]
-  ; FAST: %[[#R+16]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 8
-  ; FAST: %[[#R+17]] = or i[[#SBITS]] %[[#R+15]], %[[#R+16]]
-  ; FAST: %[[#R+18]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 9
-  ; FAST: %[[#R+19]] = or i[[#SBITS]] %[[#R+17]], %[[#R+18]]
-  ; FAST: %[[#R+20]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 10
-  ; FAST: %[[#R+21]] = or i[[#SBITS]] %[[#R+19]], %[[#R+20]]
-  ; FAST: %[[#R+22]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 11
-  ; FAST: %[[#R+23]] = or i[[#SBITS]] %[[#R+21]], %[[#R+22]]
-  ; FAST: %[[#R+24]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 12
-  ; FAST: %[[#R+25]] = or i[[#SBITS]] %[[#R+23]], %[[#R+24]]
-  ; FAST: %[[#R+26]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 13
-  ; FAST: %[[#R+27]] = or i[[#SBITS]] %[[#R+25]], %[[#R+26]]
-  ; FAST: %[[#R+28]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 14
-  ; FAST: %[[#R+29]] = or i[[#SBITS]] %[[#R+27]], %[[#R+28]]
-  ; FAST: %[[#R+30]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 15
-  ; FAST: %[[#R+31]] = or i[[#SBITS]] %[[#R+29]], %[[#R+30]]
-  ; FAST: %[[#R+32]] = extractvalue [17 x i[[#SBITS]]] %[[#R]], 16
-  ; FAST: %[[#R+33]] = or i[[#SBITS]] %[[#R+31]], %[[#R+32]]
-  ; FAST: %[[#VREG:]]  = insertelement <8 x i[[#SBITS]]> poison, i[[#SBITS]] %[[#R+33]], i32 0
-  ; FAST: %[[#VREG+1]] = insertelement <8 x i[[#SBITS]]> %[[#VREG]], i[[#SBITS]] %[[#R+33]], i32 1
-  ; FAST: %[[#VREG+2]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+1]], i[[#SBITS]] %[[#R+33]], i32 2
-  ; FAST: %[[#VREG+3]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+2]], i[[#SBITS]] %[[#R+33]], i32 3
-  ; FAST: %[[#VREG+4]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+3]], i[[#SBITS]] %[[#R+33]], i32 4
-  ; FAST: %[[#VREG+5]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+4]], i[[#SBITS]] %[[#R+33]], i32 5
-  ; FAST: %[[#VREG+6]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+5]], i[[#SBITS]] %[[#R+33]], i32 6
-  ; FAST: %[[#VREG+7]] = insertelement <8 x i[[#SBITS]]> %[[#VREG+6]], i[[#SBITS]] %[[#R+33]], i32 7
-  ; FAST: %[[#VREG+8]] = getelementptr <8 x i[[#SBITS]]>, ptr %[[P:.*]], i32 0
-  ; FAST: store <8 x i[[#SBITS]]> %[[#VREG+7]], ptr %[[#VREG+8]], align [[#SBYTES]]
-  ; FAST: %[[#VREG+9]] = getelementptr <8 x i[[#SBITS]]>, ptr %[[P]], i32 1
-  ; FAST: store <8 x i[[#SBITS]]> %[[#VREG+7]], ptr %[[#VREG+9]], align [[#SBYTES]]
-  ; FAST: %[[#VREG+10]] = getelementptr i[[#SBITS]], ptr %[[P]], i32 16
-  ; FAST: store i[[#SBITS]] %[[#R+33]], ptr %[[#VREG+10]], align [[#SBYTES]]
+  ; FAST: %[[#R:]]   = load [17 x i8], ptr @__dfsan_arg_tls, align 2
+  ; FAST: %[[#R+1]]  = extractvalue [17 x i8] %[[#R]], 0
+  ; FAST: %[[#R+2]]  = extractvalue [17 x i8] %[[#R]], 1
+  ; FAST: %[[#R+3]]  = or i8 %[[#R+1]], %[[#R+2]]
+  ; FAST: %[[#R+4]]  = extractvalue [17 x i8] %[[#R]], 2
+  ; FAST: %[[#R+5]]  = or i8 %[[#R+3]], %[[#R+4]]
+  ; FAST: %[[#R+6]]  = extractvalue [17 x i8] %[[#R]], 3
+  ; FAST: %[[#R+7]]  = or i8 %[[#R+5]], %[[#R+6]]
+  ; FAST: %[[#R+8]]  = extractvalue [17 x i8] %[[#R]], 4
+  ; FAST: %[[#R+9]]  = or i8 %[[#R+7]], %[[#R+8]]
+  ; FAST: %[[#R+10]] = extractvalue [17 x i8] %[[#R]], 5
+  ; FAST: %[[#R+11]] = or i8 %[[#R+9]], %[[#R+10]]
+  ; FAST: %[[#R+12]] = extractvalue [17 x i8] %[[#R]], 6
+  ; FAST: %[[#R+13]] = or i8 %[[#R+11]], %[[#R+12]]
+  ; FAST: %[[#R+14]] = extractvalue [17 x i8] %[[#R]], 7
+  ; FAST: %[[#R+15]] = or i8 %[[#R+13]], %[[#R+14]]
+  ; FAST: %[[#R+16]] = extractvalue [17 x i8] %[[#R]], 8
+  ; FAST: %[[#R+17]] = or i8 %[[#R+15]], %[[#R+16]]
+  ; FAST: %[[#R+18]] = extractvalue [17 x i8] %[[#R]], 9
+  ; FAST: %[[#R+19]] = or i8 %[[#R+17]], %[[#R+18]]
+  ; FAST: %[[#R+20]] = extractvalue [17 x i8] %[[#R]], 10
+  ; FAST: %[[#R+21]] = or i8 %[[#R+19]], %[[#R+20]]
+  ; FAST: %[[#R+22]] = extractvalue [17 x i8] %[[#R]], 11
+  ; FAST: %[[#R+23]] = or i8 %[[#R+21]], %[[#R+22]]
+  ; FAST: %[[#R+24]] = extractvalue [17 x i8] %[[#R]], 12
+  ; FAST: %[[#R+25]] = or i8 %[[#R+23]], %[[#R+24]]
+  ; FAST: %[[#R+26]] = extractvalue [17 x i8] %[[#R]], 13
+  ; FAST: %[[#R+27]] = or i8 %[[#R+25]], %[[#R+26]]
+  ; FAST: %[[#R+28]] = extractvalue [17 x i8] %[[#R]], 14
+  ; FAST: %[[#R+29]] = or i8 %[[#R+27]], %[[#R+28]]
+  ; FAST: %[[#R+30]] = extractvalue [17 x i8] %[[#R]], 15
+  ; FAST: %[[#R+31]] = or i8 %[[#R+29]], %[[#R+30]]
+  ; FAST: %[[#R+32]] = extractvalue [17 x i8] %[[#R]], 16
+  ; FAST: %[[#R+33]] = or i8 %[[#R+31]], %[[#R+32]]
+  ; FAST: %[[#VREG:]]  = insertelement <8 x i8> poison, i8 %[[#R+33]], i32 0
+  ; FAST: %[[#VREG+1]] = insertelement <8 x i8> %[[#VREG]], i8 %[[#R+33]], i32 1
+  ; FAST: %[[#VREG+2]] = insertelement <8 x i8> %[[#VREG+1]], i8 %[[#R+33]], i32 2
+  ; FAST: %[[#VREG+3]] = insertelement <8 x i8> %[[#VREG+2]], i8 %[[#R+33]], i32 3
+  ; FAST: %[[#VREG+4]] = insertelement <8 x i8> %[[#VREG+3]], i8 %[[#R+33]], i32 4
+  ; FAST: %[[#VREG+5]] = insertelement <8 x i8> %[[#VREG+4]], i8 %[[#R+33]], i32 5
+  ; FAST: %[[#VREG+6]] = insertelement <8 x i8> %[[#VREG+5]], i8 %[[#R+33]], i32 6
+  ; FAST: %[[#VREG+7]] = insertelement <8 x i8> %[[#VREG+6]], i8 %[[#R+33]], i32 7
+  ; FAST: %[[#VREG+8]] = getelementptr <8 x i8>, ptr %[[P:.*]], i32 0
+  ; FAST: store <8 x i8> %[[#VREG+7]], ptr %[[#VREG+8]], align 1
+  ; FAST: %[[#VREG+9]] = getelementptr <8 x i8>, ptr %[[P]], i32 1
+  ; FAST: store <8 x i8> %[[#VREG+7]], ptr %[[#VREG+9]], align 1
+  ; FAST: %[[#VREG+10]] = getelementptr i8, ptr %[[P]], i32 16
+  ; FAST: store i8 %[[#R+33]], ptr %[[#VREG+10]], align 1
   store [17 x i1] %a, ptr %p
   ret void
 }
 
 define [2 x i32] @const_array() {
   ; FAST: @const_array.dfsan
-  ; FAST: store [2 x i[[#SBITS]]] zeroinitializer, ptr @__dfsan_retval_tls, align 2
+  ; FAST: store [2 x i8] zeroinitializer, ptr @__dfsan_retval_tls, align 2
   ret [2 x i32] [ i32 42, i32 11 ]
 }
 
 define [4 x i8] @call_array([4 x i8] %a) {
   ; FAST-LABEL: @call_array.dfsan
-  ; FAST: %[[#R:]] = load [4 x i[[#SBITS]]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: store [4 x i[[#SBITS]]] %[[#R]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: %_dfsret = load [4 x i[[#SBITS]]], ptr @__dfsan_retval_tls, align [[ALIGN]]
-  ; FAST: store [4 x i[[#SBITS]]] %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: %[[#R:]] = load [4 x i8], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: store [4 x i8] %[[#R]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: %_dfsret = load [4 x i8], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: store [4 x i8] %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %r = call [4 x i8] @pass_array([4 x i8] %a)
   ret [4 x i8] %r
@@ -295,7 +292,7 @@ define [4 x i8] @call_array([4 x i8] %a) {
 
 define i8 @fun_with_large_args(i1 %i, %LargeArr %a) {
   ; FAST: @fun_with_large_args.dfsan
-  ; FAST: store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; FAST: store i8 0, ptr @__dfsan_retval_tls, align 2
   %r = extractvalue %LargeArr %a, 0
   ret i8 %r
 }
@@ -308,7 +305,7 @@ define %LargeArr @fun_with_large_ret() {
 
 define i8 @call_fun_with_large_ret() {
   ; FAST: @call_fun_with_large_ret.dfsan
-  ; FAST: store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; FAST: store i8 0, ptr @__dfsan_retval_tls, align 2
   %r = call %LargeArr @fun_with_large_ret()
   %e = extractvalue %LargeArr %r, 0
   ret i8 %e
@@ -316,8 +313,8 @@ define i8 @call_fun_with_large_ret() {
 
 define i8 @call_fun_with_large_args(i1 %i, %LargeArr %a) {
   ; FAST: @call_fun_with_large_args.dfsan
-  ; FAST: [[I:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: store i[[#SBITS]] [[I]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: [[I:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: store i8 [[I]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; FAST: %r = call i8 @fun_with_large_args.dfsan(i1 %i, [1000 x i8] %a)
 
   %r = call i8 @fun_with_large_args(i1 %i, %LargeArr %a)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll b/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll
index cc4a566aed6fa..2bcfc7b11b6e3 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/atomics.ll
@@ -6,9 +6,6 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i32 @AtomicRmwXchg(ptr %p, i32 %x) {
 entry:
   ; COMM: atomicrmw xchg: store clean shadow/origin, return clean shadow/origin
@@ -19,9 +16,9 @@ entry:
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS:mul(SBITS,4)]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS:32]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:        atomicrmw xchg ptr %p, i32 %x seq_cst
-  ; CHECK-NEXT:        store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; CHECK-NEXT:        store i8 0, ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK-NEXT:        ret i32
 
@@ -38,9 +35,9 @@ define i32 @AtomicRmwMax(ptr %p, i32 %x) {
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:        atomicrmw max ptr %p, i32 %x seq_cst
-  ; CHECK-NEXT:        store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; CHECK-NEXT:        store i8 0, ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK-NEXT:        ret i32
 
@@ -59,9 +56,9 @@ define i32 @Cmpxchg(ptr %p, i32 %a, i32 %b) {
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:        %pair = cmpxchg ptr %p, i32 %a, i32 %b seq_cst seq_cst
-  ; CHECK:             store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; CHECK:             store i8 0, ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK-NEXT:        ret i32
 
@@ -81,9 +78,9 @@ define i32 @CmpxchgMonotonic(ptr %p, i32 %a, i32 %b) {
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:        %pair = cmpxchg ptr %p, i32 %a, i32 %b release monotonic
-  ; CHECK:             store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; CHECK:             store i8 0, ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK-NEXT:        ret i32
 
@@ -100,16 +97,16 @@ define i32 @AtomicLoad(ptr %p) {
 
   ; CHECK-LABEL:  @AtomicLoad.dfsan
   ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK:        %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
+  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
   ; CHECK:        %a = load atomic i32, ptr %p seq_cst, align 16
   ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
-  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
-  ; CHECK:        %[[#AP_S:]] = or i[[#SBITS]] {{.*}}, %[[#PS]]
-  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
+  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
+  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
   ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
-  ; CHECK:        store i[[#SBITS]] %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
+  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK:        ret i32 %a
 
@@ -124,16 +121,16 @@ define i32 @AtomicLoadAcquire(ptr %p) {
 
   ; CHECK-LABEL:  @AtomicLoadAcquire.dfsan
   ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK:        %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
+  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
   ; CHECK:        %a = load atomic i32, ptr %p acquire, align 16
   ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
-  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
-  ; CHECK:        %[[#AP_S:]] = or i[[#SBITS]] {{.*}}, %[[#PS]]
-  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
+  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
+  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
   ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
-  ; CHECK:        store i[[#SBITS]] %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
+  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK:        ret i32 %a
 
@@ -148,16 +145,16 @@ define i32 @AtomicLoadMonotonic(ptr %p) {
 
   ; CHECK-LABEL:  @AtomicLoadMonotonic.dfsan
   ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK:        %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
+  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
   ; CHECK:        %a = load atomic i32, ptr %p acquire, align 16
   ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
-  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
-  ; CHECK:        %[[#AP_S:]] = or i[[#SBITS]] {{.*}}, %[[#PS]]
-  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
+  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
+  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
   ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
-  ; CHECK:        store i[[#SBITS]] %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
+  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK:        ret i32 %a
 
@@ -171,16 +168,16 @@ define i32 @AtomicLoadUnordered(ptr %p) {
 
   ; CHECK-LABEL:  @AtomicLoadUnordered.dfsan
   ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK:        %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
+  ; CHECK:        %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
   ; CHECK:        %a = load atomic i32, ptr %p acquire, align 16
   ; CHECK:        %[[#SHADOW_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#ORIGIN_PTR:]] = inttoptr i64 {{.*}} to ptr
   ; CHECK_ORIGIN: %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 16
-  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
-  ; CHECK:        %[[#AP_S:]] = or i[[#SBITS]] {{.*}}, %[[#PS]]
-  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; CHECK:        load i[[#NUM_BITS]], ptr %[[#SHADOW_PTR]], align 1
+  ; CHECK:        %[[#AP_S:]] = or i8 {{.*}}, %[[#PS]]
+  ; CHECK_ORIGIN: %[[#PS_NZ:]] = icmp ne i8 %[[#PS]], 0
   ; CHECK_ORIGIN: %[[#AP_O:]] = select i1 %[[#PS_NZ]], i32 %[[#PO]], i32 %[[#AO]]
-  ; CHECK:        store i[[#SBITS]] %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
+  ; CHECK:        store i8 %[[#AP_S]], ptr @__dfsan_retval_tls, align 2
   ; CHECK_ORIGIN: store i32 %[[#AP_O]], ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK:        ret i32 %a
 
@@ -199,7 +196,7 @@ define void @AtomicStore(ptr %p, i32 %x) {
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK:             store atomic i32 %x, ptr %p seq_cst, align 16
   ; CHECK:             ret void
 
@@ -218,7 +215,7 @@ define void @AtomicStoreRelease(ptr %p, i32 %x) {
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK:             store atomic i32 %x, ptr %p release, align 16
   ; CHECK:             ret void
 
@@ -237,7 +234,7 @@ define void @AtomicStoreMonotonic(ptr %p, i32 %x) {
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK:             store atomic i32 %x, ptr %p release, align 16
   ; CHECK:             ret void
 
@@ -256,7 +253,7 @@ define void @AtomicStoreUnordered(ptr %p, i32 %x) {
   ; CHECK:             %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:        %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:        %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:        store i[[#NUM_BITS]] 0, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK:             store atomic i32 %x, ptr %p release, align 16
   ; CHECK:             ret void
 

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/basic.ll b/llvm/test/Instrumentation/DataFlowSanitizer/basic.ll
index 567a512093bce..fa8c9267bba59 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/basic.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/basic.ll
@@ -11,9 +11,6 @@ target triple = "x86_64-unknown-linux-gnu"
 ; CHECK_NO_ORIGIN: @__dfsan_track_origins = weak_odr constant i32 0
 ; CHECK_ORIGIN1: @__dfsan_track_origins = weak_odr constant i32 1
 ; CHECK_ORIGIN2: @__dfsan_track_origins = weak_odr constant i32 2
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i8 @load(ptr %p) {
   ; CHECK-LABEL: define i8 @load.dfsan
   ; CHECK: xor i64 {{.*}}, [[SHADOW_XOR_MASK]]
@@ -30,22 +27,22 @@ define void @store(ptr %p) {
   ret void
 }
 
-; CHECK: declare void @__dfsan_load_callback(i[[#SBITS]] zeroext, ptr)
-; CHECK: declare void @__dfsan_store_callback(i[[#SBITS]] zeroext, ptr)
+; CHECK: declare void @__dfsan_load_callback(i8 zeroext, ptr)
+; CHECK: declare void @__dfsan_store_callback(i8 zeroext, ptr)
 ; CHECK: declare void @__dfsan_mem_transfer_callback(ptr, i64)
-; CHECK: declare void @__dfsan_cmp_callback(i[[#SBITS]] zeroext)
+; CHECK: declare void @__dfsan_cmp_callback(i8 zeroext)
 
 ; CHECK: ; Function Attrs: nounwind memory(read)
-; CHECK-NEXT: declare zeroext i[[#SBITS]] @__dfsan_union_load(ptr, i64)
+; CHECK-NEXT: declare zeroext i8 @__dfsan_union_load(ptr, i64)
 
 ; CHECK: ; Function Attrs: nounwind memory(read)
 ; CHECK-NEXT: declare zeroext i64 @__dfsan_load_label_and_origin(ptr, i64)
 
 ; CHECK: declare void @__dfsan_unimplemented(ptr)
-; CHECK: declare void @__dfsan_set_label(i[[#SBITS]] zeroext, i32 zeroext, ptr, i64)
+; CHECK: declare void @__dfsan_set_label(i8 zeroext, i32 zeroext, ptr, i64)
 ; CHECK: declare void @__dfsan_nonzero_label()
 ; CHECK: declare void @__dfsan_vararg_wrapper(ptr)
 ; CHECK: declare zeroext i32 @__dfsan_chain_origin(i32 zeroext)
-; CHECK: declare zeroext i32 @__dfsan_chain_origin_if_tainted(i[[#SBITS]] zeroext, i32 zeroext)
+; CHECK: declare zeroext i32 @__dfsan_chain_origin_if_tainted(i8 zeroext, i32 zeroext)
 ; CHECK: declare void @__dfsan_mem_origin_transfer(ptr, ptr, i64)
-; CHECK: declare void @__dfsan_maybe_store_origin(i[[#SBITS]] zeroext, ptr, i64, i32 zeroext)
+; CHECK: declare void @__dfsan_maybe_store_origin(i8 zeroext, ptr, i64, i32 zeroext)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/callback.ll b/llvm/test/Instrumentation/DataFlowSanitizer/callback.ll
index 4bd8e7b1ccb6c..a499ec934d9fe 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/callback.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/callback.ll
@@ -2,21 +2,18 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i8 @load8(ptr %p) {
-  ; CHECK: call void @__dfsan_load_callback(i[[#SBITS]] zeroext %[[LABEL:.*]], ptr %p)
+  ; CHECK: call void @__dfsan_load_callback(i8 zeroext %[[LABEL:.*]], ptr %p)
   ; CHECK: %a = load i8, ptr %p
-  ; CHECK: store i[[#SBITS]] %[[LABEL]], ptr @__dfsan_retval_tls
+  ; CHECK: store i8 %[[LABEL]], ptr @__dfsan_retval_tls
 
   %a = load i8, ptr %p
   ret i8 %a
 }
 
 define void @store8(ptr %p, i8 %a) {
-  ; CHECK: store i[[#SBITS]] %[[LABEL:.*]], ptr %{{.*}}
-  ; CHECK: call void @__dfsan_store_callback(i[[#SBITS]] zeroext %[[LABEL]], ptr %p)
+  ; CHECK: store i8 %[[LABEL:.*]], ptr %{{.*}}
+  ; CHECK: call void @__dfsan_store_callback(i8 zeroext %[[LABEL]], ptr %p)
   ; CHECK: store i8 %a, ptr %p
 
   store i8 %a, ptr %p
@@ -24,19 +21,19 @@ define void @store8(ptr %p, i8 %a) {
 }
 
 define i1 @cmp(i8 %a, i8 %b) {
-  ; CHECK: call void @__dfsan_cmp_callback(i[[#SBITS]] zeroext %[[CMPLABEL:.*]])
+  ; CHECK: call void @__dfsan_cmp_callback(i8 zeroext %[[CMPLABEL:.*]])
   ; CHECK: %c = icmp ne i8 %a, %b
-  ; CHECK: store i[[#SBITS]] %[[CMPLABEL]], ptr @__dfsan_retval_tls
+  ; CHECK: store i8 %[[CMPLABEL]], ptr @__dfsan_retval_tls
 
   %c = icmp ne i8 %a, %b
   ret i1 %c
 }
 
-; CHECK: declare void @__dfsan_load_callback(i[[#SBITS]] zeroext, ptr)
-; CHECK: declare void @__dfsan_store_callback(i[[#SBITS]] zeroext, ptr)
+; CHECK: declare void @__dfsan_load_callback(i8 zeroext, ptr)
+; CHECK: declare void @__dfsan_store_callback(i8 zeroext, ptr)
 ; CHECK: declare void @__dfsan_mem_transfer_callback(ptr, i64)
-; CHECK: declare void @__dfsan_cmp_callback(i[[#SBITS]] zeroext)
-; CHECK: declare void @__dfsan_conditional_callback(i[[#SBITS]] zeroext)
-; CHECK: declare void @__dfsan_conditional_callback_origin(i[[#SBITS]] zeroext, i32)
-; CHECK: declare void @__dfsan_reaches_function_callback(i[[#SBITS]] zeroext, ptr, i32, ptr)
-; CHECK: declare void @__dfsan_reaches_function_callback_origin(i[[#SBITS]] zeroext, i32, ptr, i32, ptr)
+; CHECK: declare void @__dfsan_cmp_callback(i8 zeroext)
+; CHECK: declare void @__dfsan_conditional_callback(i8 zeroext)
+; CHECK: declare void @__dfsan_conditional_callback_origin(i8 zeroext, i32)
+; CHECK: declare void @__dfsan_reaches_function_callback(i8 zeroext, ptr, i32, ptr)
+; CHECK: declare void @__dfsan_reaches_function_callback_origin(i8 zeroext, i32, ptr, i32, ptr)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll
index 44121067931a2..45388d6ac8b19 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/custom_fun_varargs_attributes.ll
@@ -1,9 +1,6 @@
 ; RUN: opt < %s -passes=dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 ; Declare a custom varargs function.
 declare i16 @custom_varargs(i64, ...)
 
@@ -12,7 +9,7 @@ define void @call_custom_varargs(ptr %buf) {
   ;; All arguments have an annotation.  Check that the transformed function
   ;; preserves each annotation.
 
-  ; CHECK: call zeroext i16 (i64, i[[#SBITS]], ptr, ptr, ...)
+  ; CHECK: call zeroext i16 (i64, i8, ptr, ptr, ...)
   ; CHECK-SAME: @__dfsw_custom_varargs
   ; CHECK-SAME: i64 signext 200
   ; CHECK-SAME: ptr nonnull

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll b/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll
index bf7f8e19db387..b3db8c2d32982 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll
@@ -2,30 +2,27 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 declare i32 @g()
 
 ; CHECK: define i32 @f.dfsan(i32 %0, i32 %1)
 define i32 @f(i32, i32) {
-  ; CHECK: [[ARGLABEL1:%.*]] = load i[[#SBITS]], {{.*}} @__dfsan_arg_tls
+  ; CHECK: [[ARGLABEL1:%.*]] = load i8, {{.*}} @__dfsan_arg_tls
   %i = alloca i32
-  ; CHECK: [[ARGCMP1:%.*]] = icmp ne i[[#SBITS]] [[ARGLABEL1]], 0
+  ; CHECK: [[ARGCMP1:%.*]] = icmp ne i8 [[ARGLABEL1]], 0
   ; CHECK: br i1 [[ARGCMP1]]
-  ; CHECK: [[ARGLABEL2:%.*]] = load i[[#SBITS]], {{.*}} @__dfsan_arg_tls
-  ; CHECK: [[LOCALLABELALLOCA:%.*]] = alloca i[[#SBITS]]
-  ; CHECK: [[ARGCMP2:%.*]] = icmp ne i[[#SBITS]] [[ARGLABEL2]], 0
+  ; CHECK: [[ARGLABEL2:%.*]] = load i8, {{.*}} @__dfsan_arg_tls
+  ; CHECK: [[LOCALLABELALLOCA:%.*]] = alloca i8
+  ; CHECK: [[ARGCMP2:%.*]] = icmp ne i8 [[ARGLABEL2]], 0
   ; CHECK: br i1 [[ARGCMP2]]
   %x = add i32 %0, %1
   store i32 %x, ptr %i
   ; CHECK: [[CALL:%.*]] = call i32 @g.dfsan()
-  ; CHECK: [[RETLABEL:%.*]] = load i[[#SBITS]], {{.*}} @__dfsan_retval_tls
-  ; CHECK: [[CALLCMP:%.*]] = icmp ne i[[#SBITS]] [[RETLABEL]], 0
+  ; CHECK: [[RETLABEL:%.*]] = load i8, {{.*}} @__dfsan_retval_tls
+  ; CHECK: [[CALLCMP:%.*]] = icmp ne i8 [[RETLABEL]], 0
   ; CHECK: br i1 [[CALLCMP]]
   %call = call i32 @g()
-  ; CHECK: [[LOCALLABEL:%.*]] = load i[[#SBITS]], ptr [[LOCALLABELALLOCA]]
-  ; CHECK: [[LOCALCMP:%.*]] = icmp ne i[[#SBITS]] [[LOCALLABEL]], 0
+  ; CHECK: [[LOCALLABEL:%.*]] = load i8, ptr [[LOCALLABELALLOCA]]
+  ; CHECK: [[LOCALCMP:%.*]] = icmp ne i8 [[LOCALLABEL]], 0
   ; CHECK: br i1 [[LOCALCMP]]
   %load = load i32, ptr %i
   ret i32 %load

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/dont_combine_offset_labels_on_gep.ll b/llvm/test/Instrumentation/DataFlowSanitizer/dont_combine_offset_labels_on_gep.ll
index 677ccd23a540b..997681bb8e692 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/dont_combine_offset_labels_on_gep.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/dont_combine_offset_labels_on_gep.ll
@@ -5,14 +5,12 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-
 define ptr @gepop(ptr %p, i32 %a, i32 %b, i32 %c) {
   ; CHECK: @gepop.dfsan
   ; CHECK_ORIGIN: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align [[ALIGN_O:4]]
-  ; CHECK: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN_S:2]]
+  ; CHECK: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN_S:2]]
   ; CHECK: %e = getelementptr [10 x [20 x i32]], ptr %p, i32 %a, i32 %b, i32 %c
-  ; CHECK: store i[[#SBITS]] %[[#PS]], ptr @__dfsan_retval_tls, align [[ALIGN_S]]
+  ; CHECK: store i8 %[[#PS]], ptr @__dfsan_retval_tls, align [[ALIGN_S]]
   ; CHECK_ORIGIN: store i32 %[[#PO]], ptr @__dfsan_retval_origin_tls, align [[ALIGN_O]]
 
   %e = getelementptr [10 x [20 x i32]], ptr %p, i32 %a, i32 %b, i32 %c

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/extern_weak.ll b/llvm/test/Instrumentation/DataFlowSanitizer/extern_weak.ll
index 71b45b6fcffdd..e0ff827e1d076 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/extern_weak.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/extern_weak.ll
@@ -3,9 +3,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 ; CHECK: declare extern_weak i8 @ExternWeak(i8)
 declare extern_weak i8 @ExternWeak(i8)
 

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/load.ll b/llvm/test/Instrumentation/DataFlowSanitizer/load.ll
index 00db6592aabd3..bf8ba909e0be0 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/load.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/load.ll
@@ -5,9 +5,6 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 
 define {} @load0(ptr %p) {
   ; CHECK-LABEL:           @load0.dfsan
@@ -21,14 +18,14 @@ define {} @load0(ptr %p) {
 
 define i8 @load8(ptr %p) {
   ; CHECK-LABEL:           @load8.dfsan
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:            %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:            %[[#SHADOW:]] = load i[[#SBITS]], ptr %[[#SHADOW_PTR]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = load i8, ptr %[[#SHADOW_PTR]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
   ; CHECK-NEXT:            %a = load i8, ptr %p
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i8 %a
 
   %a = load i8, ptr %p
@@ -37,18 +34,18 @@ define i8 @load8(ptr %p) {
 
 define i16 @load16(ptr %p) {
   ; CHECK-LABEL:           @load16.dfsan
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
   ; CHECK-NEXT:            %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:            %[[#SHADOW_PTR+1]] = getelementptr i[[#SBITS]], ptr %[[#SHADOW_PTR]], i64 1
-  ; CHECK-NEXT:            %[[#SHADOW:]]  = load i[[#SBITS]], ptr %[[#SHADOW_PTR]]
-  ; CHECK-NEXT:            %[[#SHADOW+1]] = load i[[#SBITS]], ptr %[[#SHADOW_PTR+1]]
+  ; CHECK-NEXT:            %[[#SHADOW_PTR+1]] = getelementptr i8, ptr %[[#SHADOW_PTR]], i64 1
+  ; CHECK-NEXT:            %[[#SHADOW:]]  = load i8, ptr %[[#SHADOW_PTR]]
+  ; CHECK-NEXT:            %[[#SHADOW+1]] = load i8, ptr %[[#SHADOW_PTR+1]]
 
-  ; CHECK-NEXT:            %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#SHADOW+1]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#SHADOW+1]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
   ; CHECK-NEXT:            %a = load i16, ptr %p
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i16 %a
 
   %a = load i16, ptr %p
@@ -57,19 +54,19 @@ define i16 @load16(ptr %p) {
 
 define i32 @load32(ptr %p) {
   ; CHECK-LABEL:           @load32.dfsan
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
   ; CHECK-NEXT:            %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i[[#WSBITS:mul(SBITS,4)]], ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW]], [[#mul(SBITS,2)]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i[[#WSBITS:32]], ptr %[[#SHADOW_PTR]], align 1
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW]], 16
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i[[#WSBITS]] %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW]], [[#SBITS]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW]], 8
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i[[#WSBITS]] %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
-  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i[[#WSBITS]] %[[#WIDE_SHADOW]] to i[[#SBITS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i[[#WSBITS]] %[[#WIDE_SHADOW]] to i8
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
   ; CHECK-NEXT:            %a = load i32, ptr %p, align 4
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i32 %a
 
   %a = load i32, ptr %p
@@ -78,21 +75,21 @@ define i32 @load32(ptr %p) {
 
 define i64 @load64(ptr %p) {
   ; CHECK-LABEL:           @load64.dfsan
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
   ; CHECK-NEXT:            %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i64, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i64, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i64 %[[#WIDE_SHADOW]], 32
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i64 %[[#WIDE_SHADOW]], 16
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i64 %[[#WIDE_SHADOW]], 8
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
-  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i[[#SBITS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i8
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
   ; CHECK-NEXT:            %a = load i64, ptr %p, align 8
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i64 %a
 
   %a = load i64, ptr %p
@@ -101,13 +98,13 @@ define i64 @load64(ptr %p) {
 
 define i128 @load128(ptr %p) {
   ; CHECK-LABEL:           @load128.dfsan
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
   ; CHECK-NEXT:            %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i64, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i64, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_PTR2:]] = getelementptr i64, ptr %[[#SHADOW_PTR]], i64 1
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW2:]] = load i64, ptr %[[#WIDE_SHADOW_PTR2]], align [[#SBYTES]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW2:]] = load i64, ptr %[[#WIDE_SHADOW_PTR2]], align 1
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW2]]
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i64 %[[#WIDE_SHADOW]], 32
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
@@ -115,10 +112,10 @@ define i128 @load128(ptr %p) {
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i64 %[[#WIDE_SHADOW]], 8
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
-  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i[[#SBITS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i8
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
   ; CHECK-NEXT:            %a = load i128, ptr %p, align 8
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i128 %a
 
   %a = load i128, ptr %p
@@ -128,14 +125,14 @@ define i128 @load128(ptr %p) {
 
 define i17 @load17(ptr %p) {
   ; CHECK-LABEL:           @load17.dfsan
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
   ; CHECK-NEXT:            %[[#SHADOW_PTR:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
   ; CHECK-NEXT:            %[[#SHADOW:]] = call zeroext i8 @__dfsan_union_load(ptr %[[#SHADOW_PTR]], i64 3)
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
   ; CHECK-NEXT:            %a = load i17, ptr %p
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i17 %a
 
   %a = load i17, ptr %p
@@ -146,7 +143,7 @@ define i17 @load17(ptr %p) {
 define i1 @load_global() {
   ; CHECK-LABEL:           @load_global.dfsan
   ; CHECK-NEXT:            %a = load i1, ptr @X
-  ; CHECK-NEXT:            store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i1 %a
 
   %a = load i1, ptr @X

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/lookup_table.ll b/llvm/test/Instrumentation/DataFlowSanitizer/lookup_table.ll
index e773a08b4c384..52c17f461617f 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/lookup_table.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/lookup_table.ll
@@ -5,21 +5,18 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 @lookup_table_a = external local_unnamed_addr constant [256 x i8], align 16
 @lookup_table_b = external local_unnamed_addr constant [256 x i8], align 16
 
 define i8 @load_lookup_table_a(i8 %p) {
   ; CHECK-LABEL:           @load_lookup_table_a.dfsan
-  ; CHECK-NEXT:            %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK-NEXT:            %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
   ; CHECK-NEXT:            %c = zext i8 %p to i64
   ; CHECK-NEXT:            %b = getelementptr inbounds [256 x i8], ptr @lookup_table_a, i64 0, i64 %c
   ; CHECK-NEXT:            %a = load i8, ptr %b, align 1
   ; Propagates p shadow when lookup_table_a flag is provided, otherwise propagates 0 shadow
-  ; LOOKUP_A-NEXT:         store i[[#SBITS]] %[[#PS]], ptr @__dfsan_retval_tls, align [[ALIGN]]
-  ; NO_LOOKUP_A-NEXT:      store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; LOOKUP_A-NEXT:         store i8 %[[#PS]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_LOOKUP_A-NEXT:      store i8 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i8 %a
 
   %c = zext i8 %p to i64
@@ -30,12 +27,12 @@ define i8 @load_lookup_table_a(i8 %p) {
 
 define i8 @load_lookup_table_b(i8 %p) {
   ; CHECK-LABEL:           @load_lookup_table_b.dfsan
-  ; CHECK-NEXT:            %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
+  ; CHECK-NEXT:            %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align 2
   ; CHECK-NEXT:            %c = zext i8 %p to i64
   ; CHECK-NEXT:            %b = getelementptr inbounds [256 x i8], ptr @lookup_table_b, i64 0, i64 %c
   ; CHECK-NEXT:            %a = load i8, ptr %b, align 1
   ; Propagates 0 shadow
-  ; CHECK-NEXT:            store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            ret i8 %a
 
   %c = zext i8 %p to i64

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll b/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll
index 3a1deaa12cd5f..fe4532fa7ecbc 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/memset.ll
@@ -2,16 +2,13 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
 
 define void @ms(ptr %p, i8 %v) {
   ; CHECK-LABEL: @ms.dfsan
   ; CHECK-SAME: (ptr %p, i8 %v)
-  ; CHECK: %[[ARGLABEL:.*]] = load i[[#SBITS]], {{.*}} @__dfsan_arg_tls
-  ; CHECK: call void @__dfsan_set_label(i[[#SBITS]] %[[ARGLABEL]], i32 0, ptr %p, i64 1)
+  ; CHECK: %[[ARGLABEL:.*]] = load i8, {{.*}} @__dfsan_arg_tls
+  ; CHECK: call void @__dfsan_set_label(i8 %[[ARGLABEL]], i32 0, ptr %p, i64 1)
   call void @llvm.memset.p0.i64(ptr %p, i8 %v, i64 1, i1 1)
   ret void
 }

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll
index 77ecf8013d2fc..c9d2469386998 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_abilist.ll
@@ -3,9 +3,6 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i32 @discard(i32 %a, i32 %b) {
   ret i32 0
 }
@@ -117,9 +114,9 @@ define void @call_custom_without_ret(i32 %a, i32 %b) {
   ; CHECK: @call_custom_without_ret.dfsan
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; CHECK: call void @__dfso_custom_without_ret(i32 %a, i32 %b, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], i32 zeroext [[AO]], i32 zeroext [[BO]])
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; CHECK: call void @__dfso_custom_without_ret(i32 %a, i32 %b, i8 zeroext [[AS]], i8 zeroext [[BS]], i32 zeroext [[AO]], i32 zeroext [[BO]])
   ; CHECK-NEXT: ret void
 
   call void @custom_without_ret(i32 %a, i32 %b)
@@ -131,13 +128,13 @@ define i32 @call_custom_with_ret(i32 %a, i32 %b) {
   ; CHECK: %originreturn = alloca i32, align 4
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; CHECK: {{.*}} = call i32 @__dfso_custom_with_ret(i32 %a, i32 %b, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], ptr %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
-  ; CHECK: [[RS:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
+  ; CHECK: %labelreturn = alloca i8, align 1
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; CHECK: {{.*}} = call i32 @__dfso_custom_with_ret(i32 %a, i32 %b, i8 zeroext [[AS]], i8 zeroext [[BS]], ptr %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
+  ; CHECK: [[RS:%.*]] = load i8, ptr %labelreturn, align 1
   ; CHECK: [[RO:%.*]] = load i32, ptr %originreturn, align 4
-  ; CHECK: store i[[#SBITS]] [[RS]], ptr @__dfsan_retval_tls, align 2
+  ; CHECK: store i8 [[RS]], ptr @__dfsan_retval_tls, align 2
   ; CHECK: store i32 [[RO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %r = call i32 @custom_with_ret(i32 %a, i32 %b)
@@ -149,16 +146,16 @@ define void @call_custom_varg_without_ret(i32 %a, i32 %b) {
   ; CHECK: %originva = alloca [1 x i32], align 4
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: %labelva = alloca [1 x i[[#SBITS]]], align [[#SBYTES]]
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], ptr %labelva, i32 0, i32 0
-  ; CHECK: store i[[#SBITS]] [[AS]], ptr [[VS0]], align [[#SBYTES]]
-  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], ptr %labelva, i32 0, i32 0
+  ; CHECK: %labelva = alloca [1 x i8], align 1
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i8], ptr %labelva, i32 0, i32 0
+  ; CHECK: store i8 [[AS]], ptr [[VS0]], align 1
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i8], ptr %labelva, i32 0, i32 0
   ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], ptr %originva, i32 0, i32 0
   ; CHECK: store i32 [[AO]], ptr [[VO0]], align 4
   ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], ptr %originva, i32 0, i32 0
-  ; CHECK: call void (i32, i32, i[[#SBITS]], i[[#SBITS]], ptr, i32, i32, ptr, ...) @__dfso_custom_varg_without_ret(i32 %a, i32 %b, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], ptr [[VS0]], i32 zeroext [[AO]], i32 zeroext [[BO]], ptr [[VO0]], i32 %a)
+  ; CHECK: call void (i32, i32, i8, i8, ptr, i32, i32, ptr, ...) @__dfso_custom_varg_without_ret(i32 %a, i32 %b, i8 zeroext [[AS]], i8 zeroext [[BS]], ptr [[VS0]], i32 zeroext [[AO]], i32 zeroext [[BO]], ptr [[VO0]], i32 %a)
   ; CHECK-NEXT: ret void
 
   call void (i32, i32, ...) @custom_varg_without_ret(i32 %a, i32 %b, i32 %a)
@@ -171,20 +168,20 @@ define i32 @call_custom_varg_with_ret(i32 %a, i32 %b) {
   ; CHECK: %originva = alloca [1 x i32], align 4
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls
-  ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-  ; CHECK: %labelva = alloca [1 x i[[#SBITS]]], align [[#SBYTES]]
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], ptr %labelva, i32 0, i32 0
-  ; CHECK: store i[[#SBITS]] [[BS]], ptr [[VS0]], align [[#SBYTES]]
-  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i[[#SBITS]]], ptr %labelva, i32 0, i32 0
+  ; CHECK: %labelreturn = alloca i8, align 1
+  ; CHECK: %labelva = alloca [1 x i8], align 1
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i8], ptr %labelva, i32 0, i32 0
+  ; CHECK: store i8 [[BS]], ptr [[VS0]], align 1
+  ; CHECK: [[VS0:%.*]] = getelementptr inbounds [1 x i8], ptr %labelva, i32 0, i32 0
   ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], ptr %originva, i32 0, i32 0
   ; CHECK: store i32 [[BO]], ptr [[VO0]], align 4
   ; CHECK: [[VO0:%.*]] = getelementptr inbounds [1 x i32], ptr %originva, i32 0, i32 0
-  ; CHECK: {{.*}} = call i32 (i32, i32, i[[#SBITS]], i[[#SBITS]], ptr, ptr, i32, i32, ptr, ptr, ...) @__dfso_custom_varg_with_ret(i32 %a, i32 %b, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], ptr [[VS0]], ptr %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr [[VO0]], ptr %originreturn, i32 %b)
-  ; CHECK: [[RS:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
+  ; CHECK: {{.*}} = call i32 (i32, i32, i8, i8, ptr, ptr, i32, i32, ptr, ptr, ...) @__dfso_custom_varg_with_ret(i32 %a, i32 %b, i8 zeroext [[AS]], i8 zeroext [[BS]], ptr [[VS0]], ptr %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr [[VO0]], ptr %originreturn, i32 %b)
+  ; CHECK: [[RS:%.*]] = load i8, ptr %labelreturn, align 1
   ; CHECK: [[RO:%.*]] = load i32, ptr %originreturn, align 4
-  ; CHECK: store i[[#SBITS]] [[RS]], ptr @__dfsan_retval_tls, align 2
+  ; CHECK: store i8 [[RS]], ptr @__dfsan_retval_tls, align 2
   ; CHECK: store i32 [[RO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %r = call i32 (i32, i32, ...) @custom_varg_with_ret(i32 %a, i32 %b, i32 %b)
@@ -196,13 +193,13 @@ define i32 @call_custom_cb_with_ret(i32 %a, i32 %b) {
   ; CHECK: %originreturn = alloca i32, align 4
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; CHECK: {{.*}} = call i32 @__dfso_custom_cb_with_ret(ptr @cb_with_ret.dfsan, i32 %a, i32 %b, i[[#SBITS]] zeroext 0, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], ptr %labelreturn, i32 zeroext 0, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
-  ; CHECK: [[RS:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
+  ; CHECK: %labelreturn = alloca i8, align 1
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; CHECK: {{.*}} = call i32 @__dfso_custom_cb_with_ret(ptr @cb_with_ret.dfsan, i32 %a, i32 %b, i8 zeroext 0, i8 zeroext [[AS]], i8 zeroext [[BS]], ptr %labelreturn, i32 zeroext 0, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
+  ; CHECK: [[RS:%.*]] = load i8, ptr %labelreturn, align 1
   ; CHECK: [[RO:%.*]] = load i32, ptr %originreturn, align 4
-  ; CHECK: store i[[#SBITS]] [[RS]], ptr @__dfsan_retval_tls, align 2
+  ; CHECK: store i8 [[RS]], ptr @__dfsan_retval_tls, align 2
   ; CHECK: store i32 [[RO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %r = call i32 @custom_cb_with_ret(ptr @cb_with_ret, i32 %a, i32 %b)
@@ -213,9 +210,9 @@ define void @call_custom_cb_without_ret(i32 %a, i32 %b) {
   ; CHECK-LABEL: @call_custom_cb_without_ret.dfsan
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; CHECK: call void @__dfso_custom_cb_without_ret(ptr @cb_without_ret.dfsan, i32 %a, i32 %b, i[[#SBITS]] zeroext 0, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], i32 zeroext 0, i32 zeroext [[AO]], i32 zeroext [[BO]])
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; CHECK: call void @__dfso_custom_cb_without_ret(ptr @cb_without_ret.dfsan, i32 %a, i32 %b, i8 zeroext 0, i8 zeroext [[AS]], i8 zeroext [[BS]], i32 zeroext 0, i32 zeroext [[AO]], i32 zeroext [[BO]])
   ; CHECK-NEXT: ret void
 
   call void @custom_cb_without_ret(ptr @cb_without_ret, i32 %a, i32 %b)
@@ -224,29 +221,29 @@ define void @call_custom_cb_without_ret(i32 %a, i32 %b) {
 
 ; CHECK: define i32 @discardg(i32 %0, i32 %1)
 ; CHECK: [[R:%.*]] = call i32 @g.dfsan
-; CHECK-NEXT: %_dfsret = load i[[#SBITS]], ptr @__dfsan_retval_tls, align 2
+; CHECK-NEXT: %_dfsret = load i8, ptr @__dfsan_retval_tls, align 2
 ; CHECK-NEXT: %_dfsret_o = load i32, ptr @__dfsan_retval_origin_tls, align 4
 ; CHECK-NEXT: ret i32 [[R]]
 
 ; CHECK: define linkonce_odr void @"dfso$custom_without_ret"(i32 %0, i32 %1)
 ; CHECK:  [[BO:%.*]]  = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
 ; CHECK-NEXT:  [[AO:%.*]]  = load i32, ptr @__dfsan_arg_origin_tls, align 4
-; CHECK-NEXT:  [[BS:%.*]]  = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-; CHECK-NEXT:  [[AS:%.*]]  = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-; CHECK-NEXT:  call void @__dfso_custom_without_ret(i32 %0, i32 %1, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], i32 zeroext [[AO]], i32 zeroext [[BO]])
+; CHECK-NEXT:  [[BS:%.*]]  = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+; CHECK-NEXT:  [[AS:%.*]]  = load i8, ptr @__dfsan_arg_tls, align 2
+; CHECK-NEXT:  call void @__dfso_custom_without_ret(i32 %0, i32 %1, i8 zeroext [[AS]], i8 zeroext [[BS]], i32 zeroext [[AO]], i32 zeroext [[BO]])
 ; CHECK-NEXT:  ret void
 
 ; CHECK: define linkonce_odr i32 @"dfso$custom_with_ret"(i32 %0, i32 %1)
 ; CHECK:  %originreturn = alloca i32, align 4
 ; CHECK-NEXT:  [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
 ; CHECK-NEXT:  [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-; CHECK-NEXT:  %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-; CHECK-NEXT:  [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-; CHECK-NEXT:  [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-; CHECK-NEXT:  [[R:%.*]] = call i32 @__dfso_custom_with_ret(i32 %0, i32 %1, i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], ptr %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
-; CHECK-NEXT:  [[RS:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
+; CHECK-NEXT:  %labelreturn = alloca i8, align 1
+; CHECK-NEXT:  [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+; CHECK-NEXT:  [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+; CHECK-NEXT:  [[R:%.*]] = call i32 @__dfso_custom_with_ret(i32 %0, i32 %1, i8 zeroext [[AS]], i8 zeroext [[BS]], ptr %labelreturn, i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
+; CHECK-NEXT:  [[RS:%.*]] = load i8, ptr %labelreturn, align 1
 ; CHECK-NEXT:  [[RO:%.*]] = load i32, ptr %originreturn, align 4
-; CHECK-NEXT:  store i[[#SBITS]] [[RS]], ptr @__dfsan_retval_tls, align 2
+; CHECK-NEXT:  store i8 [[RS]], ptr @__dfsan_retval_tls, align 2
 ; CHECK-NEXT:  store i32 [[RO]], ptr @__dfsan_retval_origin_tls, align 4
 ; CHECK-NEXT:  ret i32 [[R]]
 
@@ -263,14 +260,14 @@ define void @call_custom_cb_without_ret(i32 %a, i32 %b) {
 ; CHECK-NEXT:  [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
 ; CHECK-NEXT:  [[AO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
 ; CHECK-NEXT:  [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-; CHECK-NEXT:  %labelreturn = alloca i[[#SBITS]], align [[#SBYTES]]
-; CHECK-NEXT:  [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
-; CHECK-NEXT:  [[AS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-; CHECK-NEXT:  [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-; CHECK-NEXT:  [[R:%.*]] = call i32 @__dfso_custom_cb_with_ret(ptr %0, i32 %1, i32 %2, i[[#SBITS]] zeroext [[CS]], i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], ptr %labelreturn, i32 zeroext [[CO]], i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
-; CHECK-NEXT:  [[RS:%.*]] = load i[[#SBITS]], ptr %labelreturn, align [[#SBYTES]]
+; CHECK-NEXT:  %labelreturn = alloca i8, align 1
+; CHECK-NEXT:  [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+; CHECK-NEXT:  [[AS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+; CHECK-NEXT:  [[CS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+; CHECK-NEXT:  [[R:%.*]] = call i32 @__dfso_custom_cb_with_ret(ptr %0, i32 %1, i32 %2, i8 zeroext [[CS]], i8 zeroext [[AS]], i8 zeroext [[BS]], ptr %labelreturn, i32 zeroext [[CO]], i32 zeroext [[AO]], i32 zeroext [[BO]], ptr %originreturn)
+; CHECK-NEXT:  [[RS:%.*]] = load i8, ptr %labelreturn, align 1
 ; CHECK-NEXT:  [[RO:%.*]] = load i32, ptr %originreturn, align 4
-; CHECK-NEXT:  store i[[#SBITS]] [[RS]], ptr @__dfsan_retval_tls, align 2
+; CHECK-NEXT:  store i8 [[RS]], ptr @__dfsan_retval_tls, align 2
 ; CHECK-NEXT:  store i32 [[RO]], ptr @__dfsan_retval_origin_tls, align 4
 ; CHECK-NEXT:  ret i32 [[R]]
 
@@ -278,20 +275,20 @@ define void @call_custom_cb_without_ret(i32 %a, i32 %b) {
 ; CHECK:   [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
 ; CHECK-NEXT:  [[AO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
 ; CHECK-NEXT:  [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-; CHECK-NEXT:  [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
-; CHECK-NEXT:  [[AS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-; CHECK-NEXT:  [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-; CHECK-NEXT:  call void @__dfso_custom_cb_without_ret(ptr %0, i32 %1, i32 %2, i[[#SBITS]] zeroext [[CS]], i[[#SBITS]] zeroext [[AS]], i[[#SBITS]] zeroext [[BS]], i32 zeroext [[CO]], i32 zeroext [[AO]], i32 zeroext [[BO]])
+; CHECK-NEXT:  [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+; CHECK-NEXT:  [[AS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+; CHECK-NEXT:  [[CS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+; CHECK-NEXT:  call void @__dfso_custom_cb_without_ret(ptr %0, i32 %1, i32 %2, i8 zeroext [[CS]], i8 zeroext [[AS]], i8 zeroext [[BS]], i32 zeroext [[CO]], i32 zeroext [[AO]], i32 zeroext [[BO]])
 ; CHECK-NEXT:  ret void
 
-; CHECK: declare void @__dfso_custom_without_ret(i32, i32, i[[#SBITS]], i[[#SBITS]], i32, i32)
+; CHECK: declare void @__dfso_custom_without_ret(i32, i32, i8, i8, i32, i32)
 
-; CHECK: declare i32 @__dfso_custom_with_ret(i32, i32, i[[#SBITS]], i[[#SBITS]], ptr, i32, i32, ptr)
+; CHECK: declare i32 @__dfso_custom_with_ret(i32, i32, i8, i8, ptr, i32, i32, ptr)
 
-; CHECK: declare i32 @__dfso_custom_cb_with_ret(ptr, i32, i32, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], ptr, i32, i32, i32, ptr)
+; CHECK: declare i32 @__dfso_custom_cb_with_ret(ptr, i32, i32, i8, i8, i8, ptr, i32, i32, i32, ptr)
 
-; CHECK: declare void @__dfso_custom_cb_without_ret(ptr, i32, i32, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i32, i32, i32)
+; CHECK: declare void @__dfso_custom_cb_without_ret(ptr, i32, i32, i8, i8, i8, i32, i32, i32)
 
-; CHECK: declare void @__dfso_custom_varg_without_ret(i32, i32, i[[#SBITS]], i[[#SBITS]], ptr, i32, i32, ptr, ...)
+; CHECK: declare void @__dfso_custom_varg_without_ret(i32, i32, i8, i8, ptr, i32, i32, ptr, ...)
 
-; CHECK: declare i32 @__dfso_custom_varg_with_ret(i32, i32, i[[#SBITS]], i[[#SBITS]], ptr, ptr, i32, i32, ptr, ptr, ...)
+; CHECK: declare i32 @__dfso_custom_varg_with_ret(i32, i32, i8, i8, ptr, ptr, i32, i32, ptr, ptr, ...)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_cached_shadows.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_cached_shadows.ll
index b88e18b5fc417..54da1328f3748 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_cached_shadows.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_cached_shadows.ll
@@ -8,26 +8,23 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define void @cached_shadows(double %arg) {
   ; CHECK: @cached_shadows.dfsan
   ; CHECK:  [[AO:%.*]] = load i32, i32* getelementptr inbounds ([200 x i32], [200 x i32]* @__dfsan_arg_origin_tls, i64 0, i64 0), align
-  ; CHECK:  [[AS:%.*]] = load i[[#SBITS]], i[[#SBITS]]* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i[[#SBITS]]*), align [[ALIGN:2]]
+  ; CHECK:  [[AS:%.*]] = load i8, i8* bitcast ([[TLS_ARR]]* @__dfsan_arg_tls to i8*), align [[ALIGN:2]]
   ; CHECK: [[L1:.+]]:
-  ; CHECK:  {{.*}} = phi i[[#SBITS]]
+  ; CHECK:  {{.*}} = phi i8
   ; CHECK:  {{.*}} = phi i32
   ; CHECK:  {{.*}} = phi double [ 3.000000e+00
-  ; CHECK:  [[S_L1:%.*]] = phi i[[#SBITS]] [ 0, %[[L0:.*]] ], [ [[S_L7:%.*]], %[[L7:.*]] ]
+  ; CHECK:  [[S_L1:%.*]] = phi i8 [ 0, %[[L0:.*]] ], [ [[S_L7:%.*]], %[[L7:.*]] ]
   ; CHECK:  [[O_L1:%.*]] = phi i32 [ 0, %[[L0]] ], [ [[O_L7:%.*]], %[[L7]] ]
   ; CHECK:  [[V_L1:%.*]] = phi double [ 4.000000e+00, %[[L0]] ], [ [[V_L7:%.*]], %[[L7]] ]
   ; CHECK:  br i1 {{%.+}}, label %[[L2:.*]], label %[[L4:.*]]
   ; CHECK: [[L2]]:
   ; CHECK:  br i1 {{%.+}}, label %[[L3:.+]], label %[[L7]]
   ; CHECK: [[L3]]:
-  ; CHECK:  [[S_L3:%.*]] = or i[[#SBITS]]
-  ; CHECK:  [[AS_NE_L3:%.*]] = icmp ne i[[#SBITS]] [[AS]], 0
+  ; CHECK:  [[S_L3:%.*]] = or i8
+  ; CHECK:  [[AS_NE_L3:%.*]] = icmp ne i8 [[AS]], 0
   ; CHECK:  [[O_L3:%.*]] = select i1 [[AS_NE_L3]], i32 %{{[0-9]+}}, i32 [[O_L1]]
   ; CHECK:  [[V_L3:%.*]] = fsub double [[V_L1]], %{{.+}}
   ; CHECK:  br label %[[L7]]
@@ -36,13 +33,13 @@ define void @cached_shadows(double %arg) {
   ; CHECK: [[L5]]:
   ; CHECK:  br label %[[L6]]
   ; CHECK: [[L6]]:
-  ; CHECK:  [[S_L6:%.*]] = or i[[#SBITS]]
-  ; CHECK:  [[AS_NE_L6:%.*]] = icmp ne i[[#SBITS]] [[AS]], 0
+  ; CHECK:  [[S_L6:%.*]] = or i8
+  ; CHECK:  [[AS_NE_L6:%.*]] = icmp ne i8 [[AS]], 0
   ; CHECK:  [[O_L6:%.*]] = select i1 [[AS_NE_L6]], i32 [[AO]], i32 [[O_L1]]
   ; CHECK:  [[V_L6:%.*]] = fadd double [[V_L1]], %{{.+}}
   ; CHECK:  br label %[[L7]]
   ; CHECK: [[L7]]:
-  ; CHECK:  [[S_L7]] = phi i[[#SBITS]] [ [[S_L3]], %[[L3]] ], [ [[S_L1]], %[[L2]] ], [ [[S_L6]], %[[L6]] ]
+  ; CHECK:  [[S_L7]] = phi i8 [ [[S_L3]], %[[L3]] ], [ [[S_L1]], %[[L2]] ], [ [[S_L6]], %[[L6]] ]
   ; CHECK:  [[O_L7]] = phi i32 [ [[O_L3]], %[[L3]] ], [ [[O_L1]], %[[L2]] ], [ [[O_L6]], %[[L6]] ]
   ; CHECK:  [[V_L7]] = phi double [ [[V_L3]], %[[L3]] ], [ [[V_L1]], %[[L2]] ], [ [[V_L6]], %[[L6]] ]
   ; CHECK:  br i1 %{{.+}}, label %[[L1]], label %[[L8:.+]]

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
index aabdafd2ef6c7..5ee9927b9f5a8 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_call.ll
@@ -3,9 +3,6 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i1 @arg_overflow(
 i1   %a0, i1   %a1, i1   %a2, i1   %a3, i1   %a4, i1   %a5, i1   %a6, i1   %a7, i1   %a8, i1   %a9,
 i1  %a10, i1  %a11, i1  %a12, i1  %a13, i1  %a14, i1  %a15, i1  %a16, i1  %a17, i1  %a18, i1  %a19,
@@ -40,8 +37,8 @@ i1 %a200
 define i1 @param_overflow(i1 %a) {
   ; CHECK: @param_overflow.dfsan
   ; CHECK: store i32 %1, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 199), align 4
-  ; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 398) to ptr), align 2
-  ; CHECK-NEXT: store i[[#SBITS]] %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 400) to ptr), align 2
+  ; CHECK-NEXT: store i8 %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 398) to ptr), align 2
+  ; CHECK-NEXT: store i8 %2, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 400) to ptr), align 2
   ; CHECK-NEXT: %r = call i1 @arg_overflow.dfsan
   ; CHECK: %_dfsret_o = load i32, ptr @__dfsan_retval_origin_tls, align 4
   ; CHECK: store i32 %_dfsret_o, ptr @__dfsan_retval_origin_tls, align 4
@@ -76,7 +73,7 @@ declare void @foo(i1 %a)
 
 define void @param_with_zero_shadow() {
   ; CHECK: @param_with_zero_shadow.dfsan
-  ; CHECK-NEXT: store i[[#SBITS]] 0, ptr @__dfsan_arg_tls, align 2
+  ; CHECK-NEXT: store i8 0, ptr @__dfsan_arg_tls, align 2
   ; CHECK-NEXT: call void @foo.dfsan(i1 true)
 
   call void @foo(i1 1)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_load.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_load.ll
index fbaadd7aeb9fe..0c84c7975cd19 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_load.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_load.ll
@@ -3,9 +3,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define {} @load0(ptr %p) {
   ; CHECK-LABEL: @load0.dfsan
   ; CHECK-NEXT: %a = load {}, ptr %p, align 1
@@ -19,12 +16,12 @@ define {} @load0(ptr %p) {
 
 define i16 @load_non_escaped_alloca() {
   ; CHECK-LABEL: @load_non_escaped_alloca.dfsan
-  ; CHECK-NEXT: %[[#S_ALLOCA:]] = alloca i[[#SBITS]], align [[#SBYTES]]
+  ; CHECK-NEXT: %[[#S_ALLOCA:]] = alloca i8, align 1
   ; CHECK-NEXT: %_dfsa = alloca i32, align 4
-  ; CHECK:      %[[#SHADOW:]] = load i[[#SBITS]], ptr %[[#S_ALLOCA]], align [[#SBYTES]]
+  ; CHECK:      %[[#SHADOW:]] = load i8, ptr %[[#S_ALLOCA]], align 1
   ; CHECK-NEXT: %[[#ORIGIN:]] = load i32, ptr %_dfsa, align 4
   ; CHECK-NEXT: %a = load i16, ptr %p, align 2
-  ; CHECK-NEXT: store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT: store i32 %[[#ORIGIN]], ptr @__dfsan_retval_origin_tls, align 4
 
   %p = alloca i16
@@ -41,12 +38,12 @@ define ptr @load_escaped_alloca() {
   ; CHECK-NEXT:   %[[#ORIGIN_ADDR:]] = and i64 %[[#ORIGIN_OFFSET]], -4
   ; CHECK-NEXT:   %[[#ORIGIN_PTR:]] = inttoptr i64 %[[#ORIGIN_ADDR]] to ptr
   ; CHECK-NEXT:   {{%.*}} = load i32, ptr %[[#ORIGIN_PTR]], align 4
-  ; CHECK-NEXT:   %[[#SHADOW_PTR1:]] = getelementptr i[[#SBITS]], ptr %[[#SHADOW_PTR0]], i64 1
-  ; CHECK-NEXT:   %[[#SHADOW:]]  = load i[[#SBITS]], ptr %[[#SHADOW_PTR0]], align [[#SBYTES]]
-  ; CHECK-NEXT:   %[[#SHADOW+1]] = load i[[#SBITS]], ptr %[[#SHADOW_PTR1]], align [[#SBYTES]]
-  ; CHECK-NEXT:   {{%.*}} = or i[[#SBITS]] %[[#SHADOW]], %[[#SHADOW+1]]
+  ; CHECK-NEXT:   %[[#SHADOW_PTR1:]] = getelementptr i8, ptr %[[#SHADOW_PTR0]], i64 1
+  ; CHECK-NEXT:   %[[#SHADOW:]]  = load i8, ptr %[[#SHADOW_PTR0]], align 1
+  ; CHECK-NEXT:   %[[#SHADOW+1]] = load i8, ptr %[[#SHADOW_PTR1]], align 1
+  ; CHECK-NEXT:   {{%.*}} = or i8 %[[#SHADOW]], %[[#SHADOW+1]]
   ; CHECK-NEXT:   %a = load i16, ptr %p, align 2
-  ; CHECK-NEXT:   store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:   store i8 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:   store i32 0, ptr @__dfsan_retval_origin_tls, align 4
 
   %p = alloca i16
@@ -58,7 +55,7 @@ define ptr @load_escaped_alloca() {
 define i1 @load_global() {
   ; CHECK-LABEL: @load_global.dfsan
   ; CHECK: %a = load i1, ptr @X, align 1
-  ; CHECK-NEXT: store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: store i8 0, ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT: store i32 0, ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i1, ptr @X
@@ -69,7 +66,7 @@ define i1 @load1(ptr %p) {
   ; CHECK-LABEL:             @load1.dfsan
 
   ; COMBINE_LOAD_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
@@ -78,14 +75,14 @@ define i1 @load1(ptr %p) {
   ; CHECK-NEXT:            %[[#ORIGIN_ADDR:]] = and i64 %[[#ORIGIN_OFFSET]], -4
   ; CHECK-NEXT:            %[[#ORIGIN_PTR:]] = inttoptr i64 %[[#ORIGIN_ADDR]] to ptr
   ; CHECK-NEXT:            %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 4
-  ; CHECK-NEXT:            %[[#AS:]] = load i[[#SBITS]], ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:            %[[#AS:]] = load i8, ptr %[[#SHADOW_PTR]], align 1
 
-  ; COMBINE_LOAD_PTR-NEXT: %[[#AS:]] = or i[[#SBITS]] %[[#AS]], %[[#PS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_LOAD_PTR-NEXT: %[[#AS:]] = or i8 %[[#AS]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_LOAD_PTR-NEXT: %[[#AO:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#AO]]
 
   ; CHECK-NEXT:            %a = load i1, ptr %p, align 1
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#AS]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#AS]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            store i32 %[[#AO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i1, ptr %p
@@ -96,7 +93,7 @@ define i16 @load16(i1 %i, ptr %p) {
   ; CHECK-LABEL: @load16.dfsan
 
   ; COMBINE_LOAD_PTR-NEXT: %[[#PO:]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
 
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
@@ -105,17 +102,17 @@ define i16 @load16(i1 %i, ptr %p) {
   ; CHECK-NEXT:            %[[#ORIGIN_ADDR:]] = and i64 %[[#ORIGIN_OFFSET]], -4
   ; CHECK-NEXT:            %[[#ORIGIN_PTR:]] = inttoptr i64 %[[#ORIGIN_ADDR]] to ptr
   ; CHECK-NEXT:            %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 4
-  ; CHECK-NEXT:            %[[#SHADOW_PTR1:]] = getelementptr i[[#SBITS]], ptr %[[#SHADOW_PTR0]], i64 1
-  ; CHECK-NEXT:            %[[#SHADOW:]]  = load i[[#SBITS]], ptr %[[#SHADOW_PTR0]], align [[#SBYTES]]
-  ; CHECK-NEXT:            %[[#SHADOW+1]] = load i[[#SBITS]], ptr %[[#SHADOW_PTR1]], align [[#SBYTES]]
-  ; CHECK-NEXT:            %[[#AS:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#SHADOW+1]]
+  ; CHECK-NEXT:            %[[#SHADOW_PTR1:]] = getelementptr i8, ptr %[[#SHADOW_PTR0]], i64 1
+  ; CHECK-NEXT:            %[[#SHADOW:]]  = load i8, ptr %[[#SHADOW_PTR0]], align 1
+  ; CHECK-NEXT:            %[[#SHADOW+1]] = load i8, ptr %[[#SHADOW_PTR1]], align 1
+  ; CHECK-NEXT:            %[[#AS:]] = or i8 %[[#SHADOW]], %[[#SHADOW+1]]
 
-  ; COMBINE_LOAD_PTR-NEXT: %[[#AS:]] = or i[[#SBITS]] %[[#AS]], %[[#PS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_LOAD_PTR-NEXT: %[[#AS:]] = or i8 %[[#AS]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_LOAD_PTR-NEXT: %[[#AO:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#AO]]
 
   ; CHECK-NEXT:            %a = load i16, ptr %p, align 2
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#AS]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#AS]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            store i32 %[[#AO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i16, ptr %p
@@ -126,7 +123,7 @@ define i32 @load32(ptr %p) {
   ; CHECK-LABEL: @load32.dfsan
 
   ; COMBINE_LOAD_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
@@ -134,19 +131,19 @@ define i32 @load32(ptr %p) {
   ; CHECK-NEXT:            %[[#ORIGIN_ADDR:]] = add i64 %[[#SHADOW_OFFSET]], [[#ORIGIN_BASE]]
   ; CHECK-NEXT:            %[[#ORIGIN_PTR:]] = inttoptr i64 %[[#ORIGIN_ADDR]] to ptr
   ; CHECK-NEXT:            %[[#AO:]] = load i32, ptr %[[#ORIGIN_PTR]], align 4
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i[[#WSBITS:mul(SBITS,4)]], ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW+1]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW]], [[#mul(SBITS,2)]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i[[#WSBITS:32]], ptr %[[#SHADOW_PTR]], align 1
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW+1]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW]], 16
   ; CHECK-NEXT:            %[[#WIDE_SHADOW+2]] = or i[[#WSBITS]] %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW+1]]
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW+3]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW+2]], [[#SBITS]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW+3]] = lshr i[[#WSBITS]] %[[#WIDE_SHADOW+2]], 8
   ; CHECK-NEXT:            %[[#WIDE_SHADOW+4]] = or i[[#WSBITS]] %[[#WIDE_SHADOW+2]], %[[#WIDE_SHADOW+3]]
-  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i[[#WSBITS]] %[[#WIDE_SHADOW+4]] to i[[#SBITS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i[[#WSBITS]] %[[#WIDE_SHADOW+4]] to i8
 
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_LOAD_PTR-NEXT: %[[#AO:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#AO]]
 
   ; CHECK-NEXT:            %a = load i32, ptr %p, align 4
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            store i32 %[[#AO]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i32, ptr %p
@@ -157,7 +154,7 @@ define i64 @load64(ptr %p) {
   ; CHECK-LABEL: @load64.dfsan
 
   ; COMBINE_LOAD_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
@@ -165,7 +162,7 @@ define i64 @load64(ptr %p) {
   ; CHECK-NEXT:            %[[#ORIGIN_ADDR:]] = add i64 %[[#SHADOW_OFFSET]], [[#ORIGIN_BASE]]
   ; CHECK-NEXT:            %[[#ORIGIN_PTR:]] = inttoptr i64 %[[#ORIGIN_ADDR]] to ptr
   ; CHECK-NEXT:            %[[#ORIGIN:]] = load i32, ptr %[[#ORIGIN_PTR]], align 8
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i64, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = load i64, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_LO:]] = shl i64 %[[#WIDE_SHADOW]], 32
   ; CHECK-NEXT:            %[[#ORIGIN2_PTR:]] = getelementptr i32, ptr %[[#ORIGIN_PTR]], i64 1
   ; CHECK-NEXT:            %[[#ORIGIN2:]] = load i32, ptr %[[#ORIGIN2_PTR]], align 8
@@ -175,18 +172,18 @@ define i64 @load64(ptr %p) {
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i64 %[[#WIDE_SHADOW]], 8
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
-  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i[[#SBITS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i8
   ; CHECK-NEXT:            %[[#SHADOW_NZ:]] = icmp ne i64 %[[#WIDE_SHADOW_LO]], 0
   ; CHECK-NEXT:            %[[#ORIGIN:]] = select i1 %[[#SHADOW_NZ]], i32 %[[#ORIGIN]], i32 %[[#ORIGIN2]]
   ; CHECK8-NEXT:           %[[#SHADOW_NZ:]] = icmp ne i64 %[[#WIDE_SHADOW_LO]], 0
   ; CHECK8-NEXT:           %[[#ORIGIN:]] = select i1 %[[#SHADOW_NZ]], i32 %[[#ORIGIN]], i32 %[[#ORIGIN2]]
 
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_LOAD_PTR-NEXT: %[[#ORIGIN:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#ORIGIN]]
 
   ; CHECK-NEXT:            %a = load i64, ptr %p, align 8
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            store i32 %[[#ORIGIN]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i64, ptr %p
@@ -197,19 +194,19 @@ define i64 @load64_align2(ptr %p) {
   ; CHECK-LABEL: @load64_align2.dfsan
 
   ; COMBINE_LOAD_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT:            %[[#LABEL_ORIGIN:]] = call zeroext i64 @__dfsan_load_label_and_origin(ptr %p, i64 8)
   ; CHECK-NEXT:            %[[#LABEL_ORIGIN+1]] = lshr i64 %[[#LABEL_ORIGIN]], 32
-  ; CHECK-NEXT:            %[[#LABEL:]] = trunc i64 %[[#LABEL_ORIGIN+1]] to i[[#SBITS]]
+  ; CHECK-NEXT:            %[[#LABEL:]] = trunc i64 %[[#LABEL_ORIGIN+1]] to i8
   ; CHECK-NEXT:            %[[#ORIGIN:]] = trunc i64 %[[#LABEL_ORIGIN]] to i32
 
-  ; COMBINE_LOAD_PTR-NEXT: %[[#LABEL:]] = or i[[#SBITS]] %[[#LABEL]], %[[#PS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_LOAD_PTR-NEXT: %[[#LABEL:]] = or i8 %[[#LABEL]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_LOAD_PTR-NEXT: %[[#ORIGIN:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#ORIGIN]]
 
   ; CHECK-NEXT:            %a = load i64, ptr %p, align 2
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#LABEL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#LABEL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            store i32 %[[#ORIGIN]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i64, ptr %p, align 2
@@ -220,7 +217,7 @@ define i128 @load128(ptr %p) {
   ; CHECK-LABEL: @load128.dfsan
 
   ; COMBINE_LOAD_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT:            %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:            %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#MASK]]
@@ -228,12 +225,12 @@ define i128 @load128(ptr %p) {
   ; CHECK-NEXT:            %[[#ORIGIN_ADDR:]] = add i64 %[[#SHADOW_OFFSET]], [[#ORIGIN_BASE]]
   ; CHECK-NEXT:            %[[#ORIGIN1_PTR:]] = inttoptr i64 %[[#ORIGIN_ADDR]] to ptr
   ; CHECK-NEXT:            %[[#ORIGIN1:]] = load i32, ptr %[[#ORIGIN1_PTR]], align 8
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW1:]] = load i64, ptr %[[#SHADOW_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW1:]] = load i64, ptr %[[#SHADOW_PTR]], align 1
   ; CHECK-NEXT:            %[[#WIDE_SHADOW1_LO:]] = shl i64 %[[#WIDE_SHADOW1]], 32
   ; CHECK-NEXT:            %[[#ORIGIN2_PTR:]] = getelementptr i32, ptr %[[#ORIGIN1_PTR]], i64 1
   ; CHECK-NEXT:            %[[#ORIGIN2:]] = load i32, ptr %[[#ORIGIN2_PTR]], align 8
   ; CHECK-NEXT:            %[[#WIDE_SHADOW2_PTR:]] = getelementptr i64, ptr %[[#SHADOW_PTR]], i64 1
-  ; CHECK-NEXT:            %[[#WIDE_SHADOW2:]] = load i64, ptr %[[#WIDE_SHADOW2_PTR]], align [[#SBYTES]]
+  ; CHECK-NEXT:            %[[#WIDE_SHADOW2:]] = load i64, ptr %[[#WIDE_SHADOW2_PTR]], align 1
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW1]], %[[#WIDE_SHADOW2]]
   ; CHECK-NEXT:            %[[#ORIGIN3_PTR:]] = getelementptr i32, ptr %[[#ORIGIN2_PTR]], i64 1
   ; CHECK-NEXT:            %[[#ORIGIN3:]] = load i32, ptr %[[#ORIGIN3_PTR]], align 8
@@ -246,7 +243,7 @@ define i128 @load128(ptr %p) {
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
   ; CHECK-NEXT:            %[[#WIDE_SHADOW_SHIFTED:]] = lshr i64 %[[#WIDE_SHADOW]], 8
   ; CHECK-NEXT:            %[[#WIDE_SHADOW:]] = or i64 %[[#WIDE_SHADOW]], %[[#WIDE_SHADOW_SHIFTED]]
-  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i[[#SBITS]]
+  ; CHECK-NEXT:            %[[#SHADOW:]] = trunc i64 %[[#WIDE_SHADOW]] to i8
   ; CHECK-NEXT:            %[[#SHADOW1_LO_NZ:]] = icmp ne i64 %[[#WIDE_SHADOW1_LO]], 0
   ; CHECK-NEXT:            %[[#ORIGIN12:]] = select i1 %[[#SHADOW1_LO_NZ]], i32 %[[#ORIGIN1]], i32 %[[#ORIGIN2]]
   ; CHECK-NEXT:            %[[#SHADOW2_NZ:]] = icmp ne i64 %[[#WIDE_SHADOW2]], 0
@@ -254,12 +251,12 @@ define i128 @load128(ptr %p) {
   ; CHECK-NEXT:            %[[#SHADOW2_LO_NZ:]] = icmp ne i64 %[[#WIDE_SHADOW2_LO]], 0
   ; CHECK-NEXT:            %[[#ORIGIN:]] = select i1 %[[#SHADOW2_LO_NZ]], i32 %[[#ORIGIN3]], i32 %[[#ORIGIN124]]
 
-  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i[[#SBITS]] %[[#SHADOW]], %[[#PS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_LOAD_PTR-NEXT: %[[#SHADOW:]] = or i8 %[[#SHADOW]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_LOAD_PTR-NEXT: %[[#ORIGIN:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#ORIGIN]]
 
   ; CHECK-NEXT:            %a = load i128, ptr %p, align 8
-  ; CHECK-NEXT:            store i[[#SBITS]] %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT:            store i8 %[[#SHADOW]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT:            store i32 %[[#ORIGIN]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i128, ptr %p
@@ -270,19 +267,19 @@ define i17 @load17(ptr %p) {
   ; CHECK-LABEL: @load17.dfsan
 
   ; COMBINE_LOAD_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT: %[[#LABEL_ORIGIN:]] = call zeroext i64 @__dfsan_load_label_and_origin(ptr %p, i64 3)
   ; CHECK-NEXT: %[[#LABEL_ORIGIN_H32:]] = lshr i64 %[[#LABEL_ORIGIN]], 32
-  ; CHECK-NEXT: %[[#LABEL:]] = trunc i64 %[[#LABEL_ORIGIN_H32]] to i[[#SBITS]]
+  ; CHECK-NEXT: %[[#LABEL:]] = trunc i64 %[[#LABEL_ORIGIN_H32]] to i8
   ; CHECK-NEXT: %[[#ORIGIN:]] = trunc i64 %[[#LABEL_ORIGIN]] to i32
 
-  ; COMBINE_LOAD_PTR-NEXT: %[[#LABEL:]] = or i[[#SBITS]] %[[#LABEL]], %[[#PS]]
-  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_LOAD_PTR-NEXT: %[[#LABEL:]] = or i8 %[[#LABEL]], %[[#PS]]
+  ; COMBINE_LOAD_PTR-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_LOAD_PTR-NEXT: %[[#ORIGIN:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#ORIGIN]]
 
   ; CHECK-NEXT: %a = load i17, ptr %p, align 4
-  ; CHECK-NEXT: store i[[#SBITS]] %[[#LABEL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: store i8 %[[#LABEL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT: store i32 %[[#ORIGIN]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i17, ptr %p, align 4

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_mem_intrinsic.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_mem_intrinsic.ll
index 87e832fdbd3e8..f8adb0100b793 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_mem_intrinsic.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_mem_intrinsic.ll
@@ -2,9 +2,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 declare void @llvm.memcpy.p0.p0.i32(ptr, ptr, i32, i1)
 declare void @llvm.memmove.p0.p0.i32(ptr, ptr, i32, i1)
 declare void @llvm.memset.p0.i64(ptr nocapture, i8, i64, i1)
@@ -13,7 +10,7 @@ define void @memcpy(ptr %d, ptr %s, i32 %l) {
   ; CHECK: @memcpy.dfsan
   ; CHECK: [[L64:%.*]] = zext i32 %l to i64
   ; CHECK: call void @__dfsan_mem_origin_transfer(ptr %d, ptr %s, i64 [[L64]])
-  ; CHECK: call void @llvm.memcpy.p0.p0.i32(ptr align [[#SBYTES]] {{.*}}, ptr align [[#SBYTES]] {{.*}}, i32 {{.*}}, i1 false)
+  ; CHECK: call void @llvm.memcpy.p0.p0.i32(ptr align 1 {{.*}}, ptr align 1 {{.*}}, i32 {{.*}}, i1 false)
   ; CHECK: call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 %l, i1 false)
 
   call void @llvm.memcpy.p0.p0.i32(ptr %d, ptr %s, i32 %l, i1 0)
@@ -24,7 +21,7 @@ define void @memmove(ptr %d, ptr %s, i32 %l) {
   ; CHECK: @memmove.dfsan
   ; CHECK: [[L64:%.*]] = zext i32 %l to i64
   ; CHECK: call void @__dfsan_mem_origin_transfer(ptr %d, ptr %s, i64 [[L64]])
-  ; CHECK: call void @llvm.memmove.p0.p0.i32(ptr align [[#SBYTES]] {{.*}}, ptr align [[#SBYTES]] {{.*}}, i32 {{.*}}, i1 false)
+  ; CHECK: call void @llvm.memmove.p0.p0.i32(ptr align 1 {{.*}}, ptr align 1 {{.*}}, i32 {{.*}}, i1 false)
   ; CHECK: call void @llvm.memmove.p0.p0.i32(ptr %d, ptr %s, i32 %l, i1 false)
 
   call void @llvm.memmove.p0.p0.i32(ptr %d, ptr %s, i32 %l, i1 0)
@@ -34,8 +31,8 @@ define void @memmove(ptr %d, ptr %s, i32 %l) {
 define void @memset(ptr %p, i8 %v) {
   ; CHECK: @memset.dfsan
   ; CHECK: [[O:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; CHECK: [[S:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
-  ; CHECK: call void @__dfsan_set_label(i[[#SBITS]] [[S]], i32 [[O]], ptr %p, i64 1)
+  ; CHECK: [[S:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: call void @__dfsan_set_label(i8 [[S]], i32 [[O]], ptr %p, i64 1)
   call void @llvm.memset.p0.i64(ptr %p, i8 %v, i64 1, i1 1)
   ret void
 }
\ No newline at end of file

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_other_ops.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_other_ops.ll
index b78e10b1cf935..3b1020433de69 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_other_ops.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_other_ops.ll
@@ -4,9 +4,6 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define float @unop(float %f) {
   ; CHECK: @unop.dfsan
   ; CHECK: [[FO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
@@ -20,8 +17,8 @@ define i1 @binop(i1 %a, i1 %b) {
   ; CHECK: @binop.dfsan
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[NE:%.*]] = icmp ne i[[#SBITS]] [[BS]], 0
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[NE:%.*]] = icmp ne i8 [[BS]], 0
   ; CHECK: [[MO:%.*]] = select i1 [[NE]], i32 [[BO]], i32 [[AO]]
   ; CHECK: store i32 [[MO]], ptr @__dfsan_retval_origin_tls, align 4
 
@@ -42,8 +39,8 @@ define i1 @cmpop(i1 %a, i1 %b) {
   ; CHECK: @cmpop.dfsan
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[NE:%.*]] = icmp ne i[[#SBITS]] [[BS]], 0
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[NE:%.*]] = icmp ne i8 [[BS]], 0
   ; CHECK: [[MO:%.*]] = select i1 [[NE]], i32 [[BO]], i32 [[AO]]
   ; CHECK: store i32 [[MO]], ptr @__dfsan_retval_origin_tls, align 4
 
@@ -57,14 +54,14 @@ define ptr @gepop(ptr %p, i32 %a, i32 %b, i32 %c) {
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[PO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[CS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 6) to ptr), align 2
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS_NE:%.*]] = icmp ne i[[#SBITS]] [[AS]], 0
+  ; CHECK: [[CS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 6) to ptr), align 2
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS_NE:%.*]] = icmp ne i8 [[AS]], 0
   ; CHECK: [[APO:%.*]] = select i1 [[AS_NE]], i32 [[AO]], i32 [[PO]]
-  ; CHECK: [[BS_NE:%.*]] = icmp ne i[[#SBITS]] [[BS]], 0
+  ; CHECK: [[BS_NE:%.*]] = icmp ne i8 [[BS]], 0
   ; CHECK: [[ABPO:%.*]] = select i1 [[BS_NE]], i32 [[BO]], i32 [[APO]]
-  ; CHECK: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
+  ; CHECK: [[CS_NE:%.*]] = icmp ne i8 [[CS]], 0
   ; CHECK: [[ABCPO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[ABPO]]
   ; CHECK: store i32 [[ABCPO]], ptr @__dfsan_retval_origin_tls, align 4
 
@@ -76,8 +73,8 @@ define i32 @eeop(<4 x i32> %a, i32 %b) {
   ; CHECK: @eeop.dfsan
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[NE:%.*]] = icmp ne i[[#SBITS]] [[BS]], 0
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[NE:%.*]] = icmp ne i8 [[BS]], 0
   ; CHECK: [[MO:%.*]] = select i1 [[NE]], i32 [[BO]], i32 [[AO]]
   ; CHECK: store i32 [[MO]], ptr @__dfsan_retval_origin_tls, align 4
 
@@ -90,11 +87,11 @@ define <4 x i32> @ieop(<4 x i32> %p, i32 %a, i32 %b) {
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[PO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS_NE:%.*]] = icmp ne i[[#SBITS]] [[AS]], 0
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+  ; CHECK: [[AS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS_NE:%.*]] = icmp ne i8 [[AS]], 0
   ; CHECK: [[APO:%.*]] = select i1 [[AS_NE]], i32 [[AO]], i32 [[PO]]
-  ; CHECK: [[BS_NE:%.*]] = icmp ne i[[#SBITS]] [[BS]], 0
+  ; CHECK: [[BS_NE:%.*]] = icmp ne i8 [[BS]], 0
   ; CHECK: [[ABPO:%.*]] = select i1 [[BS_NE]], i32 [[BO]], i32 [[APO]]
   ; CHECK: store i32 [[ABPO]], ptr @__dfsan_retval_origin_tls, align 4
 
@@ -106,8 +103,8 @@ define <4 x i32> @svop(<4 x i32> %a, <4 x i32> %b) {
   ; CHECK: @svop.dfsan
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[NE:%.*]] = icmp ne i[[#SBITS]] [[BS]], 0
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[NE:%.*]] = icmp ne i8 [[BS]], 0
   ; CHECK: [[MO:%.*]] = select i1 [[NE]], i32 [[BO]], i32 [[AO]]
   ; CHECK: store i32 [[MO]], ptr @__dfsan_retval_origin_tls, align 4
   
@@ -128,16 +125,16 @@ define {i32, {float, float}} @ivop({i32, {float, float}} %a, {float, float} %b)
   ; CHECK: @ivop.dfsan
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMM: TODO simplify the expression [[#mul(2,SBYTES) + max(SBYTES,2)]] to
-  ; COMM: [[#mul(3,SBYTES)]], if shadow-tls-alignment is updated to match shadow
-  ; CHECK: [[BS:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2,SBYTES) + max(SBYTES,2)]]) to ptr), align 2
-  ; CHECK: [[BS0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[BS]], 0
-  ; CHECK: [[BS1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[BS]], 1
-  ; CHECK: [[BS01:%.*]] = or i[[#SBITS]] [[BS0]], [[BS1]]
-  ; CHECK: [[NE:%.*]] = icmp ne i[[#SBITS]] [[BS01]], 0
+  ; COMM: TODO simplify the expression 4 to
+  ; COMM: 6, if shadow-tls-alignment is updated to match shadow
+  ; CHECK: [[BS:%.*]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+  ; CHECK: [[BS0:%.*]] = extractvalue { i8, i8 } [[BS]], 0
+  ; CHECK: [[BS1:%.*]] = extractvalue { i8, i8 } [[BS]], 1
+  ; CHECK: [[BS01:%.*]] = or i8 [[BS0]], [[BS1]]
+  ; CHECK: [[NE:%.*]] = icmp ne i8 [[BS01]], 0
   ; CHECK: [[MO:%.*]] = select i1 [[NE]], i32 [[BO]], i32 [[AO]]
   ; CHECK: store i32 [[MO]], ptr @__dfsan_retval_origin_tls, align 4
   
   %e = insertvalue {i32, {float, float}} %a, {float, float} %b, 1
   ret {i32, {float, float}} %e
-}
\ No newline at end of file
+}

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
index 7333eaa65ef04..e98dd2b5bf85f 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_phi.ll
@@ -3,25 +3,22 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
 target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i32 @phiop(i32 %a, i32 %b, i1 %c) {
   ; CHECK: @phiop.dfsan
   ; CHECK: entry:
   ; CHECK: [[BO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
   ; CHECK: [[AO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK: [[BS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
-  ; CHECK: [[AS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK: [[BS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK: [[AS:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK: br i1 %c, label %next, label %done
   ; CHECK: next:
   ; CHECK: br i1 %c, label %T, label %F
   ; CHECK: T:
-  ; CHECK: [[BS_NE:%.*]] = icmp ne i[[#SBITS]] [[BS]], 0
+  ; CHECK: [[BS_NE:%.*]] = icmp ne i8 [[BS]], 0
   ; CHECK: [[BAO_T:%.*]] = select i1 [[BS_NE]], i32 [[BO]], i32 [[AO]]
   ; CHECK: br label %done
   ; CHECK: F:
-  ; CHECK: [[AS_NE:%.*]] = icmp ne i[[#SBITS]] [[AS]], 0
+  ; CHECK: [[AS_NE:%.*]] = icmp ne i8 [[AS]], 0
   ; CHECK: [[BAO_F:%.*]] = select i1 [[AS_NE]], i32 [[AO]], i32 [[BO]]
   ; CHECK: br label %done
   ; CHECK: done:

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
index 58d2e9f949fee..133bf2263f21e 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_select.ll
@@ -5,17 +5,14 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i8 @select8(i1 %c, i8 %t, i8 %f) {
   ; TRACK_CONTROL_FLOW: @select8.dfsan
   ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
   ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
   ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
+  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
   ; TRACK_CONTROL_FLOW: [[TFO:%.*]] = select i1 %c, i32 [[TO]], i32 [[FO]]
-  ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
+  ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i8 [[CS]], 0
   ; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
   ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
 
@@ -33,8 +30,8 @@ define i8 @select8e(i1 %c, i8 %tf) {
   ; TRACK_CONTROL_FLOW: @select8e.dfsan
   ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
   ; TRACK_CONTROL_FLOW: [[TFO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
+  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i8 [[CS]], 0
   ; TRACK_CONTROL_FLOW: [[CTFO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[TFO]]
   ; TRACK_CONTROL_FLOW: store i32 [[CTFO]], ptr @__dfsan_retval_origin_tls, align 4
 
@@ -51,19 +48,19 @@ define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
   ; TRACK_CONTROL_FLOW: [[CO:%.*]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
   ; TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
   ; TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
-  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
-  ; TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
+  ; TRACK_CONTROL_FLOW: [[FS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+  ; TRACK_CONTROL_FLOW: [[CS:%.*]] = load i8, ptr @__dfsan_arg_tls, align 2
+  ; TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i8 [[FS]], 0
   ; TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
-  ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i[[#SBITS]] [[CS]], 0
+  ; TRACK_CONTROL_FLOW: [[CS_NE:%.*]] = icmp ne i8 [[CS]], 0
   ; TRACK_CONTROL_FLOW: [[CFTO:%.*]] = select i1 [[CS_NE]], i32 [[CO]], i32 [[FTO]]
   ; TRACK_CONTROL_FLOW: store i32 [[CFTO]], ptr @__dfsan_retval_origin_tls, align 4
 
   ; NO_TRACK_CONTROL_FLOW: @select8v.dfsan
   ; NO_TRACK_CONTROL_FLOW: [[FO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 2), align 4
   ; NO_TRACK_CONTROL_FLOW: [[TO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
-  ; NO_TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i[[#SBITS]] [[FS]], 0
+  ; NO_TRACK_CONTROL_FLOW: [[FS:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align 2
+  ; NO_TRACK_CONTROL_FLOW: [[FS_NE:%.*]] = icmp ne i8 [[FS]], 0
   ; NO_TRACK_CONTROL_FLOW: [[FTO:%.*]] = select i1 [[FS_NE]], i32 [[FO]], i32 [[TO]]
   ; NO_TRACK_CONTROL_FLOW: store i32 [[FTO]], ptr @__dfsan_retval_origin_tls, align 4
 

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_store.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_store.ll
index 4d3a764e91315..0b0ba40a89643 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_store.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_store.ll
@@ -3,15 +3,12 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define void @store_zero_to_non_escaped_alloca() {
   ; CHECK-LABEL: @store_zero_to_non_escaped_alloca.dfsan
-  ; CHECK-NEXT: [[A:%.*]] = alloca i[[#SBITS]], align [[#SBYTES]]
+  ; CHECK-NEXT: [[A:%.*]] = alloca i8, align 1
   ; CHECK-NEXT: %_dfsa = alloca i32, align 4
   ; CHECK-NEXT: %p = alloca i16, align 2
-  ; CHECK-NEXT: store i[[#SBITS]] 0, ptr [[A]], align [[#SBYTES]]
+  ; CHECK-NEXT: store i8 0, ptr [[A]], align 1
   ; CHECK-NEXT: store i16 1, ptr %p, align 2
   ; CHECK-NEXT: ret void
 
@@ -35,9 +32,9 @@ declare void @foo(ptr %p)
 
 define void @store_zero_to_escaped_alloca() {
   ; CHECK-LABEL: @store_zero_to_escaped_alloca.dfsan
-  ; CHECK:  store i[[#NUM_BITS:mul(SBITS,2)]] 0, ptr {{.*}}, align [[#SBYTES]]
+  ; CHECK:  store i16 0, ptr {{.*}}, align 1
   ; CHECK-NEXT:  store i16 1, ptr %p, align 2
-  ; CHECK-NEXT:  store i[[#SBITS]] 0, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK-NEXT:  store i8 0, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
   ; CHECK-NEXT:  call void @foo.dfsan(ptr %p)
 
   %p = alloca i16
@@ -49,14 +46,14 @@ define void @store_zero_to_escaped_alloca() {
 define void @store_nonzero_to_escaped_alloca(i16 %a) {
   ; CHECK-LABEL:  @store_nonzero_to_escaped_alloca.dfsan
   ; CHECK-NEXT:   %[[#AO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK-NEXT:   %[[#AS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK-NEXT:   %[[#AS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK:        %[[#INTP:]] = ptrtoint ptr %p to i64
   ; CHECK-NEXT:   %[[#SHADOW_OFFSET:]] = xor i64 %[[#INTP]], [[#%.10d,MASK:]]
   ; CHECK-NEXT:   %[[#SHADOW_PTR0:]] = inttoptr i64 %[[#SHADOW_OFFSET]] to ptr
   ; CHECK-NEXT:   %[[#ORIGIN_OFFSET:]] = add i64 %[[#SHADOW_OFFSET]], [[#%.10d,ORIGIN_BASE:]]
   ; CHECK-NEXT:   %[[#ORIGIN_ADDR:]] = and i64 %[[#ORIGIN_OFFSET]], -4
   ; CHECK-NEXT:   %[[#ORIGIN_PTR:]] = inttoptr i64 %[[#ORIGIN_ADDR]] to ptr
-  ; CHECK:        %_dfscmp = icmp ne i[[#SBITS]] %[[#AS]], 0
+  ; CHECK:        %_dfscmp = icmp ne i8 %[[#AS]], 0
   ; CHECK-NEXT:   br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]],
   ; CHECK:       [[L1]]:
   ; CHECK-NEXT:   %[[#NO:]] = call i32 @__dfsan_chain_origin(i32 %[[#AO]])
@@ -75,16 +72,16 @@ define void @store64_align8(ptr %p, i64 %a) {
   ; CHECK-LABEL: @store64_align8.dfsan
 
   ; COMBINE_STORE_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_STORE_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_STORE_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT:  %[[#AO:]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; CHECK-NEXT:  %[[#AS:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; CHECK-NEXT:  %[[#AS:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
 
-  ; COMBINE_STORE_PTR-NEXT: %[[#AS:]] = or i[[#SBITS]] %[[#AS]], %[[#PS]]
-  ; COMBINE_STORE_PTR-NEXT: %[[#NE:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_STORE_PTR-NEXT: %[[#AS:]] = or i8 %[[#AS]], %[[#PS]]
+  ; COMBINE_STORE_PTR-NEXT: %[[#NE:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_STORE_PTR-NEXT: %[[#AO:]] = select i1 %[[#NE]], i32 %[[#PO]], i32 %[[#AO]]
 
-  ; CHECK:       %_dfscmp = icmp ne i[[#SBITS]] %[[#AS]], 0
+  ; CHECK:       %_dfscmp = icmp ne i8 %[[#AS]], 0
   ; CHECK-NEXT:  br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]],
   ; CHECK:      [[L1]]:
   ; CHECK-NEXT:  %[[#NO:]] = call i32 @__dfsan_chain_origin(i32 %[[#AO]])
@@ -104,16 +101,16 @@ define void @store64_align2(ptr %p, i64 %a) {
   ; CHECK-LABEL: @store64_align2.dfsan
 
   ; COMBINE_STORE_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_STORE_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_STORE_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT: %[[#AO:]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; CHECK-NEXT: %[[#AS:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; CHECK-NEXT: %[[#AS:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
 
-  ; COMBINE_STORE_PTR-NEXT: %[[#AS:]] = or i[[#SBITS]] %[[#AS]], %[[#PS]]
-  ; COMBINE_STORE_PTR-NEXT: %[[#NE:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_STORE_PTR-NEXT: %[[#AS:]] = or i8 %[[#AS]], %[[#PS]]
+  ; COMBINE_STORE_PTR-NEXT: %[[#NE:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_STORE_PTR-NEXT: %[[#AO:]] = select i1 %[[#NE]], i32 %[[#PO]], i32 %[[#AO]]
 
-  ; CHECK:      %_dfscmp = icmp ne i[[#SBITS]] %[[#AS]], 0
+  ; CHECK:      %_dfscmp = icmp ne i8 %[[#AS]], 0
   ; CHECK-NEXT: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]],
   ; CHECK:     [[L1]]:
   ; CHECK-NEXT: %[[#NO:]] = call i32 @__dfsan_chain_origin(i32 %[[#AO]])
@@ -131,16 +128,16 @@ define void @store96_align8(ptr %p, i96 %a) {
   ; CHECK-LABEL: @store96_align8.dfsan
 
   ; COMBINE_STORE_PTR-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; COMBINE_STORE_PTR-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; COMBINE_STORE_PTR-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
 
   ; CHECK-NEXT: %[[#AO:]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; CHECK-NEXT: %[[#AS:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; CHECK-NEXT: %[[#AS:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
 
-  ; COMBINE_STORE_PTR-NEXT: %[[#AS:]] = or i[[#SBITS]] %[[#AS]], %[[#PS]]
-  ; COMBINE_STORE_PTR-NEXT: %[[#NE:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; COMBINE_STORE_PTR-NEXT: %[[#AS:]] = or i8 %[[#AS]], %[[#PS]]
+  ; COMBINE_STORE_PTR-NEXT: %[[#NE:]] = icmp ne i8 %[[#PS]], 0
   ; COMBINE_STORE_PTR-NEXT: %[[#AO:]] = select i1 %[[#NE]], i32 %[[#PO]], i32 %[[#AO]]
 
-  ; CHECK:      %_dfscmp = icmp ne i[[#SBITS]] %[[#AS]], 0
+  ; CHECK:      %_dfscmp = icmp ne i8 %[[#AS]], 0
   ; CHECK-NEXT: br i1 %_dfscmp, label %[[L1:.*]], label %[[L2:.*]],
   ; CHECK:     [[L1]]:
   ; CHECK-NEXT: %[[#NO:]] = call i32 @__dfsan_chain_origin(i32 %[[#AO]])

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll
index 15c856c26beaa..3630ebca3d50c 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_store_threshold.ll
@@ -2,17 +2,14 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define void @store_threshold(ptr %p, [2 x i64] %a) {
   ; CHECK: @store_threshold.dfsan
   ; CHECK: [[AO:%.*]] = load i32, ptr getelementptr inbounds ([200 x i32], ptr @__dfsan_arg_origin_tls, i64 0, i64 1), align 4
-  ; CHECK: [[AS:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; CHECK: [[AS0:%.*]] = extractvalue [2 x i[[#SBITS]]] [[AS]], 0
-  ; CHECK: [[AS1:%.*]] = extractvalue [2 x i[[#SBITS]]] [[AS]], 1
-  ; CHECK: [[AS01:%.*]] = or i[[#SBITS]] [[AS0]], [[AS1]]
-  ; CHECK: call void @__dfsan_maybe_store_origin(i[[#SBITS]] [[AS01]], ptr %p, i64 16, i32 [[AO]])
+  ; CHECK: [[AS:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
+  ; CHECK: [[AS0:%.*]] = extractvalue [2 x i8] [[AS]], 0
+  ; CHECK: [[AS1:%.*]] = extractvalue [2 x i8] [[AS]], 1
+  ; CHECK: [[AS01:%.*]] = or i8 [[AS0]], [[AS1]]
+  ; CHECK: call void @__dfsan_maybe_store_origin(i8 [[AS01]], ptr %p, i64 16, i32 [[AO]])
   ; CHECK: store [2 x i64] %a, ptr %p, align 8
 
   store [2 x i64] %a, ptr %p

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/origin_track_load.ll b/llvm/test/Instrumentation/DataFlowSanitizer/origin_track_load.ll
index 20e80f233d9c1..b93d2eb3b7480 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/origin_track_load.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/origin_track_load.ll
@@ -2,27 +2,24 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i64 @load64(ptr %p) {
   ; CHECK-LABEL: @load64.dfsan
 
   ; CHECK-NEXT: %[[#PO:]] = load i32, ptr @__dfsan_arg_origin_tls, align 4
-  ; CHECK-NEXT: %[[#PS:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK-NEXT: %[[#PS:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
 
   ; CHECK-NEXT: %[[#LABEL_ORIGIN:]] = call zeroext i64 @__dfsan_load_label_and_origin(ptr %p, i64 8)
   ; CHECK-NEXT: %[[#LABEL_ORIGIN_H32:]] = lshr i64 %[[#LABEL_ORIGIN]], 32
-  ; CHECK-NEXT: %[[#LABEL:]] = trunc i64 %[[#LABEL_ORIGIN_H32]] to i[[#SBITS]]
+  ; CHECK-NEXT: %[[#LABEL:]] = trunc i64 %[[#LABEL_ORIGIN_H32]] to i8
   ; CHECK-NEXT: %[[#ORIGIN:]] = trunc i64 %[[#LABEL_ORIGIN]] to i32
-  ; CHECK-NEXT: %[[#ORIGIN_CHAINED:]] = call i32 @__dfsan_chain_origin_if_tainted(i[[#SBITS]] %[[#LABEL]], i32 %[[#ORIGIN]])
+  ; CHECK-NEXT: %[[#ORIGIN_CHAINED:]] = call i32 @__dfsan_chain_origin_if_tainted(i8 %[[#LABEL]], i32 %[[#ORIGIN]])
 
-  ; CHECK-NEXT: %[[#LABEL:]] = or i[[#SBITS]] %[[#LABEL]], %[[#PS]]
-  ; CHECK-NEXT: %[[#NZ:]] = icmp ne i[[#SBITS]] %[[#PS]], 0
+  ; CHECK-NEXT: %[[#LABEL:]] = or i8 %[[#LABEL]], %[[#PS]]
+  ; CHECK-NEXT: %[[#NZ:]] = icmp ne i8 %[[#PS]], 0
   ; CHECK-NEXT: %[[#ORIGIN_SEL:]] = select i1 %[[#NZ]], i32 %[[#PO]], i32 %[[#ORIGIN_CHAINED]]
 
   ; CHECK-NEXT: %a = load i64, ptr %p
-  ; CHECK-NEXT: store i[[#SBITS]] %[[#LABEL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: store i8 %[[#LABEL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT: store i32 %[[#ORIGIN_SEL]], ptr @__dfsan_retval_origin_tls, align 4
 
   %a = load i64, ptr %p

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll b/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
index 50e0f47047992..592d3eb2fe539 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/phi.ll
@@ -2,15 +2,12 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define {i32, i32} @test({i32, i32} %a, i1 %c) {
-  ; CHECK: %[[#AL:]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; CHECK: %[[#AL0:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 0
-  ; CHECK: %[[#AL1:]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#AL]], i[[#SBITS]] 0, 1
-  ; CHECK: %[[#PL:]] = phi { i[[#SBITS]], i[[#SBITS]] } [ %[[#AL0]], %T ], [ %[[#AL1]], %F ]
-  ; CHECK: store { i[[#SBITS]], i[[#SBITS]] } %[[#PL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK: %[[#AL:]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK: %[[#AL0:]] = insertvalue { i8, i8 } %[[#AL]], i8 0, 0
+  ; CHECK: %[[#AL1:]] = insertvalue { i8, i8 } %[[#AL]], i8 0, 1
+  ; CHECK: %[[#PL:]] = phi { i8, i8 } [ %[[#AL0]], %T ], [ %[[#AL1]], %F ]
+  ; CHECK: store { i8, i8 } %[[#PL]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
 entry:
   br i1 %c, label %T, label %F

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/select.ll b/llvm/test/Instrumentation/DataFlowSanitizer/select.ll
index 069383b51b6fa..5056616a4703a 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/select.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/select.ll
@@ -5,27 +5,24 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i8 @select8(i1 %c, i8 %t, i8 %f) {
   ; TRACK_CF: @select8.dfsan
-  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
-  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
-  ; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
+  ; TRACK_CF: %[[#R:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; TRACK_CF: %[[#R+1]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; TRACK_CF: %[[#R+2]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; TRACK_CF: %[[#R+3]] = select i1 %c, i8 %[[#R+1]], i8 %[[#R]]
+  ; TRACK_CF: %[[#RO:]] = or i8 %[[#R+2]], %[[#R+3]]
   ; TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
-  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; TRACK_CF: store i8 %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; TRACK_CF: ret i8 %a
 
   ; NO_TRACK_CF: @select8.dfsan
-  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
-  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#R+3]] = select i1 %c, i[[#SBITS]] %[[#R+1]], i[[#SBITS]] %[[#R]]
+  ; NO_TRACK_CF: %[[#R:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; NO_TRACK_CF: %[[#R+1]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R+2]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R+3]] = select i1 %c, i8 %[[#R+1]], i8 %[[#R]]
   ; NO_TRACK_CF: %a = select i1 %c, i8 %t, i8 %f
-  ; NO_TRACK_CF: store i[[#SBITS]] %[[#R+3]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_TRACK_CF: store i8 %[[#R+3]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; NO_TRACK_CF: ret i8 %a
 
   %a = select i1 %c, i8 %t, i8 %f
@@ -34,18 +31,18 @@ define i8 @select8(i1 %c, i8 %t, i8 %f) {
 
 define i8 @select8e(i1 %c, i8 %tf) {
   ; TRACK_CF: @select8e.dfsan
-  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
+  ; TRACK_CF: %[[#R:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; TRACK_CF: %[[#R+1]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; TRACK_CF: %[[#RO:]] = or i8 %[[#R+1]], %[[#R]]
   ; TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
-  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; TRACK_CF: store i8 %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; TRACK_CF: ret i8 %a
 
   ; NO_TRACK_CF: @select8e.dfsan
-  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R+1]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; NO_TRACK_CF: %a = select i1 %c, i8 %tf, i8 %tf
-  ; NO_TRACK_CF: store i[[#SBITS]] %[[#R]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_TRACK_CF: store i8 %[[#R]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; NO_TRACK_CF: ret i8 %a
 
   %a = select i1 %c, i8 %tf, i8 %tf
@@ -54,22 +51,22 @@ define i8 @select8e(i1 %c, i8 %tf) {
 
 define <4 x i8> @select8v(<4 x i1> %c, <4 x i8> %t, <4 x i8> %f) {
   ; TRACK_CF: @select8v.dfsan
-  ; TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
-  ; TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; TRACK_CF: %[[#R+3]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
-  ; TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+2]], %[[#R+3]]
+  ; TRACK_CF: %[[#R:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; TRACK_CF: %[[#R+1]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; TRACK_CF: %[[#R+2]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; TRACK_CF: %[[#R+3]] = or i8 %[[#R+1]], %[[#R]]
+  ; TRACK_CF: %[[#RO:]] = or i8 %[[#R+2]], %[[#R+3]]
   ; TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
-  ; TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; TRACK_CF: store i8 %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; TRACK_CF: ret <4 x i8> %a
 
   ; NO_TRACK_CF: @select8v.dfsan
-  ; NO_TRACK_CF: %[[#R:]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
-  ; NO_TRACK_CF: %[[#R+1]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; NO_TRACK_CF: %[[#RO:]] = or i[[#SBITS]] %[[#R+1]], %[[#R]]
+  ; NO_TRACK_CF: %[[#R:]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; NO_TRACK_CF: %[[#R+1]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#R+2]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; NO_TRACK_CF: %[[#RO:]] = or i8 %[[#R+1]], %[[#R]]
   ; NO_TRACK_CF: %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f
-  ; NO_TRACK_CF: store i[[#SBITS]] %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_TRACK_CF: store i8 %[[#RO]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; NO_TRACK_CF: ret <4 x i8> %a
 
   %a = select <4 x i1> %c, <4 x i8> %t, <4 x i8> %f

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll b/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
index db126d1ec02f8..e338976dca093 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/shadow-args-zext.ll
@@ -4,12 +4,9 @@
 
 ; Test that the custom abi marks shadow parameters as zero extended.
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define i32 @m() {
   ; CHECK-LABEL: @m.dfsan
-  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i[[#SBITS]] zeroext 0, ptr %{{.*}})
+  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_dfsan_get_label(i64 signext 56, i8 zeroext 0, ptr %{{.*}})
 
 entry:
   %call = call zeroext i16 @dfsan_get_label(i64 signext 56)
@@ -19,7 +16,7 @@ entry:
 
 define i32 @k() {
   ; CHECK-LABEL: @k.dfsan
-  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
+  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k2(i64 signext 56, i64 signext 67, i8 zeroext {{.*}}, i8 zeroext {{.*}}, ptr %{{.*}})
 
 entry:
   %call = call zeroext i16 @k2(i64 signext 56, i64 signext 67)
@@ -29,7 +26,7 @@ entry:
 
 define i32 @k3() {
   ; CHECK-LABEL: @k3.dfsan
-  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, i[[#SBITS]] zeroext {{.*}}, ptr %{{.*}})
+  ; CHECK: %{{.*}} = call zeroext i16 @__dfsw_k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89, i8 zeroext {{.*}}, i8 zeroext {{.*}}, i8 zeroext {{.*}}, i8 zeroext {{.*}}, ptr %{{.*}})
 
 entry:
   %call = call zeroext i16 @k4(i64 signext 56, i64 signext 67, i64 signext 78, i64 signext 89)
@@ -39,17 +36,17 @@ entry:
 
 declare zeroext i16 @dfsan_get_label(i64 signext)
 ; CHECK-LABEL: @"dfsw$dfsan_get_label"
-; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i[[#SBITS]] zeroext %1, ptr %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_dfsan_get_label(i64 %0, i8 zeroext %1, ptr %{{.*}})
 
 declare zeroext i16 @k2(i64 signext, i64 signext)
 ; CHECK-LABEL: @"dfsw$k2"
-; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_k2(i64 %{{.*}}, i64 %{{.*}}, i8 zeroext %{{.*}}, i8 zeroext %{{.*}}, ptr %{{.*}})
 
 declare zeroext i16 @k4(i64 signext, i64 signext, i64 signext, i64 signext)
 ; CHECK-LABEL: @"dfsw$k4"
-; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64  %{{.*}}, i64 %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, i[[#SBITS]] zeroext %{{.*}}, ptr %{{.*}})
+; CHECK: %{{.*}} = call i16 @__dfsw_k4(i64 %{{.*}}, i64 %{{.*}}, i64  %{{.*}}, i64 %{{.*}}, i8 zeroext %{{.*}}, i8 zeroext %{{.*}}, i8 zeroext %{{.*}}, i8 zeroext %{{.*}}, ptr %{{.*}})
 
 
-; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i[[#SBITS]], ptr)
-; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], ptr)
-; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], i[[#SBITS]], ptr)
+; CHECK: declare zeroext i16 @__dfsw_dfsan_get_label(i64 signext, i8, ptr)
+; CHECK: declare zeroext i16 @__dfsw_k2(i64 signext, i64 signext, i8, i8, ptr)
+; CHECK: declare zeroext i16 @__dfsw_k4(i64 signext, i64 signext, i64 signext, i64 signext, i8, i8, i8, i8, ptr)

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/store.ll b/llvm/test/Instrumentation/DataFlowSanitizer/store.ll
index 05b99e40a37ca..bc2a70e853315 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/store.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/store.ll
@@ -3,9 +3,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define void @store0({} %v, ptr %p) {
   ; CHECK-LABEL: @store0.dfsan
   ; CHECK:       store {} %v, ptr %p
@@ -18,16 +15,16 @@ define void @store0({} %v, ptr %p) {
 
 define void @store8(i8 %v, ptr %p) {
   ; CHECK-LABEL:       @store8.dfsan
-  ; NO_COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
+  ; NO_COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
   ; COMBINE_PTR_LABEL:    load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
 
-  ; COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
-  ; COMBINE_PTR_LABEL: or i[[#SBITS]]
+  ; COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
+  ; COMBINE_PTR_LABEL: or i8
   ; CHECK:             ptrtoint ptr {{.*}} i64
   ; CHECK-NEXT:        xor i64
   ; CHECK-NEXT:        inttoptr i64 {{.*}} ptr
-  ; CHECK-NEXT:        getelementptr i[[#SBITS]], ptr
-  ; CHECK-NEXT:        store i[[#SBITS]]
+  ; CHECK-NEXT:        getelementptr i8, ptr
+  ; CHECK-NEXT:        store i8
   ; CHECK-NEXT:        store i8 %v, ptr %p
   ; CHECK-NEXT:        ret void
 
@@ -37,17 +34,17 @@ define void @store8(i8 %v, ptr %p) {
 
 define void @store16(i16 %v, ptr %p) {
   ; CHECK-LABEL:       @store16.dfsan
-  ; NO_COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
+  ; NO_COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
   ; COMBINE_PTR_LABEL:    load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
-  ; COMBINE_PTR_LABEL: or i[[#SBITS]]
+  ; COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
+  ; COMBINE_PTR_LABEL: or i8
   ; CHECK:             ptrtoint ptr {{.*}} i64
   ; CHECK-NEXT:        xor i64
   ; CHECK-NEXT:        inttoptr i64 {{.*}} ptr
-  ; CHECK-NEXT:        getelementptr i[[#SBITS]], ptr
-  ; CHECK-NEXT:        store i[[#SBITS]]
-  ; CHECK-NEXT:        getelementptr i[[#SBITS]], ptr
-  ; CHECK-NEXT:        store i[[#SBITS]]
+  ; CHECK-NEXT:        getelementptr i8, ptr
+  ; CHECK-NEXT:        store i8
+  ; CHECK-NEXT:        getelementptr i8, ptr
+  ; CHECK-NEXT:        store i8
   ; CHECK-NEXT:        store i16 %v, ptr %p
   ; CHECK-NEXT:        ret void
 
@@ -57,21 +54,21 @@ define void @store16(i16 %v, ptr %p) {
 
 define void @store32(i32 %v, ptr %p) {
   ; CHECK-LABEL:       @store32.dfsan
-  ; NO_COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
+  ; NO_COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
   ; COMBINE_PTR_LABEL:    load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
-  ; COMBINE_PTR_LABEL: or i[[#SBITS]]
+  ; COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
+  ; COMBINE_PTR_LABEL: or i8
   ; CHECK:             ptrtoint ptr {{.*}} i64
   ; CHECK-NEXT:        xor i64
   ; CHECK-NEXT:        inttoptr i64 {{.*}} ptr
-  ; CHECK-NEXT:        getelementptr i[[#SBITS]], ptr
-  ; CHECK-NEXT:        store i[[#SBITS]]
-  ; CHECK-NEXT:        getelementptr i[[#SBITS]], ptr
-  ; CHECK-NEXT:        store i[[#SBITS]]
-  ; CHECK-NEXT:        getelementptr i[[#SBITS]], ptr
-  ; CHECK-NEXT:        store i[[#SBITS]]
-  ; CHECK-NEXT:        getelementptr i[[#SBITS]], ptr
-  ; CHECK-NEXT:        store i[[#SBITS]]
+  ; CHECK-NEXT:        getelementptr i8, ptr
+  ; CHECK-NEXT:        store i8
+  ; CHECK-NEXT:        getelementptr i8, ptr
+  ; CHECK-NEXT:        store i8
+  ; CHECK-NEXT:        getelementptr i8, ptr
+  ; CHECK-NEXT:        store i8
+  ; CHECK-NEXT:        getelementptr i8, ptr
+  ; CHECK-NEXT:        store i8
   ; CHECK-NEXT:        store i32 %v, ptr %p
   ; CHECK-NEXT:        ret void
 
@@ -81,16 +78,16 @@ define void @store32(i32 %v, ptr %p) {
 
 define void @store64(i64 %v, ptr %p) {
   ; CHECK-LABEL:       @store64.dfsan
-  ; NO_COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
+  ; NO_COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
   ; COMBINE_PTR_LABEL:    load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align 2
-  ; COMBINE_PTR_LABEL: load i[[#SBITS]], ptr @__dfsan_arg_tls
-  ; COMBINE_PTR_LABEL: or i[[#SBITS]]
+  ; COMBINE_PTR_LABEL: load i8, ptr @__dfsan_arg_tls
+  ; COMBINE_PTR_LABEL: or i8
   ; CHECK:             ptrtoint ptr {{.*}} i64
   ; CHECK-NEXT:        xor i64
   ; CHECK-NEXT:        inttoptr i64 {{.*}} ptr
-  ; CHECK-COUNT-8:     insertelement {{.*}} i[[#SBITS]]
-  ; CHECK-NEXT:        getelementptr <8 x i[[#SBITS]]>
-  ; CHECK-NEXT:        store <8 x i[[#SBITS]]>
+  ; CHECK-COUNT-8:     insertelement {{.*}} i8
+  ; CHECK-NEXT:        getelementptr <8 x i8>
+  ; CHECK-NEXT:        store <8 x i8>
   ; CHECK-NEXT:        store i64 %v, ptr %p
   ; CHECK-NEXT:        ret void
 
@@ -100,7 +97,7 @@ define void @store64(i64 %v, ptr %p) {
 
 define void @store_zero(ptr %p) {
   ; CHECK-LABEL:          @store_zero.dfsan
-  ; NO_COMBINE_PTR_LABEL: store i[[#mul(4, SBITS)]] 0, ptr {{.*}}
+  ; NO_COMBINE_PTR_LABEL: store i32 0, ptr {{.*}}
   store i32 0, ptr %p
   ret void
 }

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll b/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll
index 08e2fc678f392..8069d28e557db 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/struct.ll
@@ -9,22 +9,19 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK: @__dfsan_arg_tls = external thread_local(initialexec) global [[TLS_ARR:\[100 x i64\]]]
 ; CHECK: @__dfsan_retval_tls = external thread_local(initialexec) global [[TLS_ARR]]
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define {ptr, i32} @pass_struct({ptr, i32} %s) {
   ; NO_COMBINE_LOAD_PTR: @pass_struct.dfsan
-  ; NO_COMBINE_LOAD_PTR: [[L:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; NO_COMBINE_LOAD_PTR: store { i[[#SBITS]], i[[#SBITS]] } [[L]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_COMBINE_LOAD_PTR: [[L:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; NO_COMBINE_LOAD_PTR: store { i8, i8 } [[L]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   ; DEBUG_NONZERO_LABELS: @pass_struct.dfsan
-  ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[L]], 0
-  ; DEBUG_NONZERO_LABELS: [[L1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[L]], 1
-  ; DEBUG_NONZERO_LABELS: [[L01:%.*]] = or i[[#SBITS]] [[L0]], [[L1]]
-  ; DEBUG_NONZERO_LABELS: {{.*}} = icmp ne i[[#SBITS]] [[L01]], 0
+  ; DEBUG_NONZERO_LABELS: [[L:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; DEBUG_NONZERO_LABELS: [[L0:%.*]] = extractvalue { i8, i8 } [[L]], 0
+  ; DEBUG_NONZERO_LABELS: [[L1:%.*]] = extractvalue { i8, i8 } [[L]], 1
+  ; DEBUG_NONZERO_LABELS: [[L01:%.*]] = or i8 [[L0]], [[L1]]
+  ; DEBUG_NONZERO_LABELS: {{.*}} = icmp ne i8 [[L01]], 0
   ; DEBUG_NONZERO_LABELS: call void @__dfsan_nonzero_label()
-  ; DEBUG_NONZERO_LABELS: store { i[[#SBITS]], i[[#SBITS]] } [[L]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; DEBUG_NONZERO_LABELS: store { i8, i8 } [[L]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   ret {ptr, i32} %s
 }
@@ -33,8 +30,8 @@ define {ptr, i32} @pass_struct({ptr, i32} %s) {
 
 define %StructOfAggr @pass_struct_of_aggregate(%StructOfAggr %s) {
   ; NO_COMBINE_LOAD_PTR: @pass_struct_of_aggregate.dfsan
-  ; NO_COMBINE_LOAD_PTR: %1 = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; NO_COMBINE_LOAD_PTR: store { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } %1, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_COMBINE_LOAD_PTR: %1 = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; NO_COMBINE_LOAD_PTR: store { i8, [4 x i8], i8, { i8, i8 } } %1, ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   ret %StructOfAggr %s
 }
@@ -51,7 +48,7 @@ define {} @load_empty_struct(ptr %p) {
 
 define {i1, i32} @load_global_struct() {
   ; NO_COMBINE_LOAD_PTR: @load_global_struct.dfsan
-  ; NO_COMBINE_LOAD_PTR: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR: store { i8, i8 } zeroinitializer, ptr @__dfsan_retval_tls, align 2
 
   %a = load {i1, i32}, ptr @Y
   ret {i1, i32} %a
@@ -59,24 +56,24 @@ define {i1, i32} @load_global_struct() {
 
 define {i1, i32} @select_struct(i1 %c, {i1, i32} %a, {i1, i32} %b) {
   ; NO_SELECT_CONTROL: @select_struct.dfsan
-  ; NO_SELECT_CONTROL: [[B:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2, SBYTES) + 2]]) to ptr), align [[ALIGN:2]]
-  ; NO_SELECT_CONTROL: [[A:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; NO_SELECT_CONTROL: [[C:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; NO_SELECT_CONTROL: [[S:%.*]] = select i1 %c, { i[[#SBITS]], i[[#SBITS]] } [[A]], { i[[#SBITS]], i[[#SBITS]] } [[B]]
-  ; NO_SELECT_CONTROL: store { i[[#SBITS]], i[[#SBITS]] } [[S]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; NO_SELECT_CONTROL: [[B:%.*]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; NO_SELECT_CONTROL: [[A:%.*]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; NO_SELECT_CONTROL: [[C:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; NO_SELECT_CONTROL: [[S:%.*]] = select i1 %c, { i8, i8 } [[A]], { i8, i8 } [[B]]
+  ; NO_SELECT_CONTROL: store { i8, i8 } [[S]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   ; FAST: @select_struct.dfsan
-  ; FAST: %[[#R:]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2, SBYTES) + 2]]) to ptr), align [[ALIGN:2]]
-  ; FAST: %[[#R+1]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; FAST: %[[#R+2]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: %[[#R+3]] = select i1 %c, { i[[#SBITS]], i[[#SBITS]] } %[[#R+1]], { i[[#SBITS]], i[[#SBITS]] } %[[#R]]
-  ; FAST: %[[#R+4]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#R+3]], 0
-  ; FAST: %[[#R+5]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } %[[#R+3]], 1
-  ; FAST: %[[#R+6]] = or i[[#SBITS]] %[[#R+4]], %[[#R+5]]
-  ; FAST: %[[#R+7]] = or i[[#SBITS]] %[[#R+2]], %[[#R+6]]
-  ; FAST: %[[#R+8]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] %[[#R+7]], 0
-  ; FAST: %[[#R+9]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } %[[#R+8]], i[[#SBITS]] %[[#R+7]], 1
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } %[[#R+9]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: %[[#R:]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; FAST: %[[#R+1]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; FAST: %[[#R+2]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: %[[#R+3]] = select i1 %c, { i8, i8 } %[[#R+1]], { i8, i8 } %[[#R]]
+  ; FAST: %[[#R+4]] = extractvalue { i8, i8 } %[[#R+3]], 0
+  ; FAST: %[[#R+5]] = extractvalue { i8, i8 } %[[#R+3]], 1
+  ; FAST: %[[#R+6]] = or i8 %[[#R+4]], %[[#R+5]]
+  ; FAST: %[[#R+7]] = or i8 %[[#R+2]], %[[#R+6]]
+  ; FAST: %[[#R+8]] = insertvalue { i8, i8 } undef, i8 %[[#R+7]], 0
+  ; FAST: %[[#R+9]] = insertvalue { i8, i8 } %[[#R+8]], i8 %[[#R+7]], 1
+  ; FAST: store { i8, i8 } %[[#R+9]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %s = select i1 %c, {i1, i32} %a, {i1, i32} %b
   ret {i1, i32} %s
@@ -84,12 +81,12 @@ define {i1, i32} @select_struct(i1 %c, {i1, i32} %a, {i1, i32} %b) {
 
 define { i32, i32 } @asm_struct(i32 %0, i32 %1) {
   ; FAST: @asm_struct.dfsan
-  ; FAST: [[E1:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
-  ; FAST: [[E0:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: [[E01:%.*]] = or i[[#SBITS]] [[E0]], [[E1]]
-  ; FAST: [[S0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[E01]], 0
-  ; FAST: [[S1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[S0]], i[[#SBITS]] [[E01]], 1
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[E1:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; FAST: [[E0:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: [[E01:%.*]] = or i8 [[E0]], [[E1]]
+  ; FAST: [[S0:%.*]] = insertvalue { i8, i8 } undef, i8 [[E01]], 0
+  ; FAST: [[S1:%.*]] = insertvalue { i8, i8 } [[S0]], i8 [[E01]], 1
+  ; FAST: store { i8, i8 } [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
 entry:
   %a = call { i32, i32 } asm "", "=r,=r,r,r,~{dirflag},~{fpsr},~{flags}"(i32 %0, i32 %1)
@@ -98,15 +95,15 @@ entry:
 
 define {i32, i32} @const_struct() {
   ; FAST: @const_struct.dfsan
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } zeroinitializer, ptr @__dfsan_retval_tls, align 2
+  ; FAST: store { i8, i8 } zeroinitializer, ptr @__dfsan_retval_tls, align 2
   ret {i32, i32} { i32 42, i32 11 }
 }
 
 define i1 @extract_struct({i1, i5} %s) {
   ; FAST: @extract_struct.dfsan
-  ; FAST: [[SM:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[EM:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[SM]], 0
-  ; FAST: store i[[#SBITS]] [[EM]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[SM:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[EM:%.*]] = extractvalue { i8, i8 } [[SM]], 0
+  ; FAST: store i8 [[EM]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %e2 = extractvalue {i1, i5} %s, 0
   ret i1 %e2
@@ -114,26 +111,26 @@ define i1 @extract_struct({i1, i5} %s) {
 
 define {i1, i5} @insert_struct({i1, i5} %s, i5 %e1) {
   ; FAST: @insert_struct.dfsan
-  ; FAST: [[EM:%.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2, SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; FAST: [[SM:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: [[SM1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[SM]], i[[#SBITS]] [[EM]], 1
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } [[SM1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[EM:%.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; FAST: [[SM:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: [[SM1:%.*]] = insertvalue { i8, i8 } [[SM]], i8 [[EM]], 1
+  ; FAST: store { i8, i8 } [[SM1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   %s1 = insertvalue {i1, i5} %s, i5 %e1, 1
   ret {i1, i5} %s1
 }
 
 define {i1, i1} @load_struct(ptr %p) {
   ; NO_COMBINE_LOAD_PTR: @load_struct.dfsan
-  ; NO_COMBINE_LOAD_PTR: [[OL:%.*]] = or i[[#SBITS]]
-  ; NO_COMBINE_LOAD_PTR: [[S0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[OL]], 0
-  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } [[S0]], i[[#SBITS]] [[OL]], 1
-  ; NO_COMBINE_LOAD_PTR: store { i[[#SBITS]], i[[#SBITS]] } [[S1]], ptr @__dfsan_retval_tls, align 2
+  ; NO_COMBINE_LOAD_PTR: [[OL:%.*]] = or i8
+  ; NO_COMBINE_LOAD_PTR: [[S0:%.*]] = insertvalue { i8, i8 } undef, i8 [[OL]], 0
+  ; NO_COMBINE_LOAD_PTR: [[S1:%.*]] = insertvalue { i8, i8 } [[S0]], i8 [[OL]], 1
+  ; NO_COMBINE_LOAD_PTR: store { i8, i8 } [[S1]], ptr @__dfsan_retval_tls, align 2
 
   ; EVENT_CALLBACKS: @load_struct.dfsan
-  ; EVENT_CALLBACKS: [[OL0:%.*]] = or i[[#SBITS]]
-  ; EVENT_CALLBACKS: [[OL1:%.*]] = or i[[#SBITS]] [[OL0]],
-  ; EVENT_CALLBACKS: [[S0:%.*]] = insertvalue { i[[#SBITS]], i[[#SBITS]] } undef, i[[#SBITS]] [[OL1]], 0
-  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i[[#SBITS]] zeroext [[OL1]]
+  ; EVENT_CALLBACKS: [[OL0:%.*]] = or i8
+  ; EVENT_CALLBACKS: [[OL1:%.*]] = or i8 [[OL0]],
+  ; EVENT_CALLBACKS: [[S0:%.*]] = insertvalue { i8, i8 } undef, i8 [[OL1]], 0
+  ; EVENT_CALLBACKS: call void @__dfsan_load_callback(i8 zeroext [[OL1]]
 
   %s = load {i1, i1}, ptr %p
   ret {i1, i1} %s
@@ -141,30 +138,30 @@ define {i1, i1} @load_struct(ptr %p) {
 
 define void @store_struct(ptr %p, {i1, i1} %s) {
   ; FAST: @store_struct.dfsan
-  ; FAST: [[S:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
-  ; FAST: [[E0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[S]], 0
-  ; FAST: [[E1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[S]], 1
-  ; FAST: [[E:%.*]] = or i[[#SBITS]] [[E0]], [[E1]]
-  ; FAST: [[P0:%.*]] = getelementptr i[[#SBITS]], ptr [[P:%.*]], i32 0
-  ; FAST: store i[[#SBITS]] [[E]], ptr [[P0]], align [[#SBYTES]]
-  ; FAST: [[P1:%.*]] = getelementptr i[[#SBITS]], ptr [[P]], i32 1
-  ; FAST: store i[[#SBITS]] [[E]], ptr [[P1]], align [[#SBYTES]]
+  ; FAST: [[S:%.*]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; FAST: [[E0:%.*]] = extractvalue { i8, i8 } [[S]], 0
+  ; FAST: [[E1:%.*]] = extractvalue { i8, i8 } [[S]], 1
+  ; FAST: [[E:%.*]] = or i8 [[E0]], [[E1]]
+  ; FAST: [[P0:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 0
+  ; FAST: store i8 [[E]], ptr [[P0]], align 1
+  ; FAST: [[P1:%.*]] = getelementptr i8, ptr [[P]], i32 1
+  ; FAST: store i8 [[E]], ptr [[P1]], align 1
 
   ; EVENT_CALLBACKS: @store_struct.dfsan
-  ; EVENT_CALLBACKS: [[OL:%.*]] = or i[[#SBITS]]
-  ; EVENT_CALLBACKS: call void @__dfsan_store_callback(i[[#SBITS]] zeroext [[OL]]
+  ; EVENT_CALLBACKS: [[OL:%.*]] = or i8
+  ; EVENT_CALLBACKS: call void @__dfsan_store_callback(i8 zeroext [[OL]]
 
   ; COMBINE_STORE_PTR: @store_struct.dfsan
-  ; COMBINE_STORE_PTR: [[PL:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; COMBINE_STORE_PTR: [[SL:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; COMBINE_STORE_PTR: [[SL0:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[SL]], 0
-  ; COMBINE_STORE_PTR: [[SL1:%.*]] = extractvalue { i[[#SBITS]], i[[#SBITS]] } [[SL]], 1
-  ; COMBINE_STORE_PTR: [[SL01:%.*]] = or i[[#SBITS]] [[SL0]], [[SL1]]
-  ; COMBINE_STORE_PTR: [[E:%.*]] = or i[[#SBITS]] [[SL01]], [[PL]]
-  ; COMBINE_STORE_PTR: [[P0:%.*]] = getelementptr i[[#SBITS]], ptr [[P:%.*]], i32 0
-  ; COMBINE_STORE_PTR: store i[[#SBITS]] [[E]], ptr [[P0]], align [[#SBYTES]]
-  ; COMBINE_STORE_PTR: [[P1:%.*]] = getelementptr i[[#SBITS]], ptr [[P]], i32 1
-  ; COMBINE_STORE_PTR: store i[[#SBITS]] [[E]], ptr [[P1]], align [[#SBYTES]]
+  ; COMBINE_STORE_PTR: [[PL:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; COMBINE_STORE_PTR: [[SL:%.*]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; COMBINE_STORE_PTR: [[SL0:%.*]] = extractvalue { i8, i8 } [[SL]], 0
+  ; COMBINE_STORE_PTR: [[SL1:%.*]] = extractvalue { i8, i8 } [[SL]], 1
+  ; COMBINE_STORE_PTR: [[SL01:%.*]] = or i8 [[SL0]], [[SL1]]
+  ; COMBINE_STORE_PTR: [[E:%.*]] = or i8 [[SL01]], [[PL]]
+  ; COMBINE_STORE_PTR: [[P0:%.*]] = getelementptr i8, ptr [[P:%.*]], i32 0
+  ; COMBINE_STORE_PTR: store i8 [[E]], ptr [[P0]], align 1
+  ; COMBINE_STORE_PTR: [[P1:%.*]] = getelementptr i8, ptr [[P]], i32 1
+  ; COMBINE_STORE_PTR: store i8 [[E]], ptr [[P1]], align 1
 
   store {i1, i1} %s, ptr %p
   ret void
@@ -172,9 +169,9 @@ define void @store_struct(ptr %p, {i1, i1} %s) {
 
 define i2 @extract_struct_of_aggregate11(%StructOfAggr %s) {
   ; FAST: @extract_struct_of_aggregate11.dfsan
-  ; FAST: [[E:%.*]] = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[E11:%.*]] = extractvalue { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } [[E]], 1, 1
-  ; FAST: store i[[#SBITS]] [[E11]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[E:%.*]] = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[E11:%.*]] = extractvalue { i8, [4 x i8], i8, { i8, i8 } } [[E]], 1, 1
+  ; FAST: store i8 [[E11]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %e11 = extractvalue %StructOfAggr %s, 1, 1
   ret i2 %e11
@@ -182,46 +179,46 @@ define i2 @extract_struct_of_aggregate11(%StructOfAggr %s) {
 
 define [4 x i2] @extract_struct_of_aggregate1(%StructOfAggr %s) {
   ; FAST: @extract_struct_of_aggregate1.dfsan
-  ; FAST: [[E:%.*]] = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[E1:%.*]] = extractvalue { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } [[E]], 1
-  ; FAST: store [4 x i[[#SBITS]]] [[E1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[E:%.*]] = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[E1:%.*]] = extractvalue { i8, [4 x i8], i8, { i8, i8 } } [[E]], 1
+  ; FAST: store [4 x i8] [[E1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   %e1 = extractvalue %StructOfAggr %s, 1
   ret [4 x i2] %e1
 }
 
 define <4 x i3> @extract_struct_of_aggregate2(%StructOfAggr %s) {
   ; FAST: @extract_struct_of_aggregate2.dfsan
-  ; FAST: [[E:%.*]] = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[E2:%.*]] = extractvalue { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } [[E]], 2
-  ; FAST: store i[[#SBITS]] [[E2]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[E:%.*]] = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[E2:%.*]] = extractvalue { i8, [4 x i8], i8, { i8, i8 } } [[E]], 2
+  ; FAST: store i8 [[E2]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   %e2 = extractvalue %StructOfAggr %s, 2
   ret <4 x i3> %e2
 }
 
 define { i1, i1 } @extract_struct_of_aggregate3(%StructOfAggr %s) {
   ; FAST: @extract_struct_of_aggregate3.dfsan
-  ; FAST: [[E:%.*]] = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[E3:%.*]] = extractvalue { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } [[E]], 3
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } [[E3]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[E:%.*]] = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[E3:%.*]] = extractvalue { i8, [4 x i8], i8, { i8, i8 } } [[E]], 3
+  ; FAST: store { i8, i8 } [[E3]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   %e3 = extractvalue %StructOfAggr %s, 3
   ret { i1, i1 } %e3
 }
 
 define i1 @extract_struct_of_aggregate31(%StructOfAggr %s) {
   ; FAST: @extract_struct_of_aggregate31.dfsan
-  ; FAST: [[E:%.*]] = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: [[E31:%.*]] = extractvalue { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } [[E]], 3, 1
-  ; FAST: store i[[#SBITS]] [[E31]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[E:%.*]] = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: [[E31:%.*]] = extractvalue { i8, [4 x i8], i8, { i8, i8 } } [[E]], 3, 1
+  ; FAST: store i8 [[E31]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   %e31 = extractvalue %StructOfAggr %s, 3, 1
   ret i1 %e31
 }
 
 define %StructOfAggr @insert_struct_of_aggregate11(%StructOfAggr %s, i2 %e11) {
   ; FAST: @insert_struct_of_aggregate11.dfsan
-  ; FAST: [[E11:%.*]]  = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(8, SBYTES)]]) to ptr), align [[ALIGN:2]]
-  ; FAST: [[S:%.*]] = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: [[S1:%.*]] = insertvalue { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } [[S]], i[[#SBITS]] [[E11]], 1, 1
-  ; FAST: store { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[E11:%.*]]  = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 8) to ptr), align [[ALIGN:2]]
+  ; FAST: [[S:%.*]] = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: [[S1:%.*]] = insertvalue { i8, [4 x i8], i8, { i8, i8 } } [[S]], i8 [[E11]], 1, 1
+  ; FAST: store { i8, [4 x i8], i8, { i8, i8 } } [[S1]], ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %s1 = insertvalue %StructOfAggr %s, i2 %e11, 1, 1
   ret %StructOfAggr %s1
@@ -229,10 +226,10 @@ define %StructOfAggr @insert_struct_of_aggregate11(%StructOfAggr %s, i2 %e11) {
 
 define {ptr, i32} @call_struct({ptr, i32} %s) {
   ; FAST: @call_struct.dfsan
-  ; FAST: [[S:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } [[S]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: %_dfsret = load { i[[#SBITS]], i[[#SBITS]] }, ptr @__dfsan_retval_tls, align [[ALIGN]]
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[S:%.*]] = load { i8, i8 }, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; FAST: store { i8, i8 } [[S]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: %_dfsret = load { i8, i8 }, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: store { i8, i8 } %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %r = call {ptr, i32} @pass_struct({ptr, i32} %s)
   ret {ptr, i32} %r
@@ -242,14 +239,14 @@ declare %StructOfAggr @fun_with_many_aggr_args(<2 x i7> %v, [2 x i5] %a, {i3, i3
 
 define %StructOfAggr @call_many_aggr_args(<2 x i7> %v, [2 x i5] %a, {i3, i3} %s) {
   ; FAST: @call_many_aggr_args.dfsan
-  ; FAST: [[S:%.*]] = load { i[[#SBITS]], i[[#SBITS]] }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2, SBYTES) + 2]]) to ptr), align [[ALIGN:2]]
-  ; FAST: [[A:%.*]] = load [2 x i[[#SBITS]]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; FAST: [[V:%.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: store i[[#SBITS]] [[V]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; FAST: store [2 x i[[#SBITS]]] [[A]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
-  ; FAST: store { i[[#SBITS]], i[[#SBITS]] } [[S]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 [[#mul(2, SBYTES) + 2]]) to ptr), align [[ALIGN]]
-  ; FAST: %_dfsret = load { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } }, ptr @__dfsan_retval_tls, align [[ALIGN]]
-  ; FAST: store { i[[#SBITS]], [4 x i[[#SBITS]]], i[[#SBITS]], { i[[#SBITS]], i[[#SBITS]] } } %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: [[S:%.*]] = load { i8, i8 }, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN:2]]
+  ; FAST: [[A:%.*]] = load [2 x i8], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; FAST: [[V:%.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: store i8 [[V]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; FAST: store [2 x i8] [[A]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN]]
+  ; FAST: store { i8, i8 } [[S]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 4) to ptr), align [[ALIGN]]
+  ; FAST: %_dfsret = load { i8, [4 x i8], i8, { i8, i8 } }, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; FAST: store { i8, [4 x i8], i8, { i8, i8 } } %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
 
   %r = call %StructOfAggr @fun_with_many_aggr_args(<2 x i7> %v, [2 x i5] %a, {i3, i3} %s)
   ret %StructOfAggr %r

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll b/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll
index 5a5ce94ba16b6..bff9bafa8a658 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/uninstrumented_local_functions.ll
@@ -1,9 +1,6 @@
 ; RUN: opt < %s -passes=dfsan -dfsan-abilist=%S/Inputs/abilist.txt -S | FileCheck %s
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define internal i8 @uninstrumented_internal_fun(i8 %in) {
   ret i8 %in
 }

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/union.ll b/llvm/test/Instrumentation/DataFlowSanitizer/union.ll
index c021e934a4a66..12905c4a9a18e 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/union.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/union.ll
@@ -2,9 +2,6 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 @a = common global i32 0
 @b = common global i32 0
 
@@ -12,10 +9,10 @@ target triple = "x86_64-unknown-linux-gnu"
 
 ; CHECK-LABEL: @f.dfsan
 define void @f(i32 %x, i32 %y) {
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   %xay = add i32 %x, %y
   store i32 %xay, ptr @a
-  ; CHECK-NOT: or i[[#SBITS]]
+  ; CHECK-NOT: or i8
   %xmy = mul i32 %x, %y
   store i32 %xmy, ptr @b
   ret void
@@ -29,13 +26,13 @@ define void @g(i1 %p, i32 %x, i32 %y) {
   br i1 %p, label %l1, label %l2
 
 l1:
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   %xay = add i32 %x, %y
   store i32 %xay, ptr @a
   br label %l3
 
 l2:
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   %xmy = mul i32 %x, %y
   store i32 %xmy, ptr @b
   br label %l3
@@ -48,9 +45,9 @@ l3:
 
 ; CHECK-LABEL: @h.dfsan
 define i32 @h(i32 %x, i32 %y) {
-  ; CHECK: or i[[#SBITS]]
+  ; CHECK: or i8
   %xay = add i32 %x, %y
-  ; CHECK-NOT: or i[[#SBITS]]
+  ; CHECK-NOT: or i8
   %xayax = add i32 %xay, %x
   ret i32 %xayax
 }

diff  --git a/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll b/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll
index ec27154ac38d2..64052d66d899a 100644
--- a/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll
+++ b/llvm/test/Instrumentation/DataFlowSanitizer/vector.ll
@@ -2,20 +2,17 @@
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
-; CHECK: @__dfsan_shadow_width_bits = weak_odr constant i32 [[#SBITS:]]
-; CHECK: @__dfsan_shadow_width_bytes = weak_odr constant i32 [[#SBYTES:]]
-
 define <4 x i4> @pass_vector(<4 x i4> %v) {
   ; CHECK-LABEL: @pass_vector.dfsan
-  ; CHECK-NEXT: %[[#REG:]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; CHECK-NEXT: store i[[#SBITS]] %[[#REG]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: %[[#REG:]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK-NEXT: store i8 %[[#REG]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT: ret <4 x i4> %v
   ret <4 x i4> %v
 }
 
 define void @load_update_store_vector(ptr %p) {
   ; CHECK-LABEL: @load_update_store_vector.dfsan
-  ; CHECK: {{.*}} = load i[[#SBITS]], ptr @__dfsan_arg_tls, align 2
+  ; CHECK: {{.*}} = load i8, ptr @__dfsan_arg_tls, align 2
 
   %v = load <4 x i4>, ptr %p
   %e2 = extractelement <4 x i4> %v, i32 2
@@ -26,12 +23,12 @@ define void @load_update_store_vector(ptr %p) {
 
 define <4 x i1> @icmp_vector(<4 x i8> %a, <4 x i8> %b) {
   ; CHECK-LABEL: @icmp_vector.dfsan
-  ; CHECK-NEXT: %[[B:.*]] = load i[[#SBITS]], ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
-  ; CHECK-NEXT: %[[A:.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN]]
-  ; CHECK:       %[[L:.*]] = or i[[#SBITS]] %[[A]], %[[B]]
+  ; CHECK-NEXT: %[[B:.*]] = load i8, ptr inttoptr (i64 add (i64 ptrtoint (ptr @__dfsan_arg_tls to i64), i64 2) to ptr), align [[ALIGN:2]]
+  ; CHECK-NEXT: %[[A:.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK:       %[[L:.*]] = or i8 %[[A]], %[[B]]
 
   ; CHECK: %r = icmp eq <4 x i8> %a, %b
-  ; CHECK: store i[[#SBITS]] %[[L]], ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK: store i8 %[[L]], ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK: ret <4 x i1> %r
 
   %r = icmp eq <4 x i8> %a, %b
@@ -40,7 +37,7 @@ define <4 x i1> @icmp_vector(<4 x i8> %a, <4 x i8> %b) {
 
 define <2 x i32> @const_vector() {
   ; CHECK-LABEL: @const_vector.dfsan
-  ; CHECK-NEXT: store i[[#SBITS]] 0, ptr @__dfsan_retval_tls, align 2
+  ; CHECK-NEXT: store i8 0, ptr @__dfsan_retval_tls, align 2
   ; CHECK-NEXT: ret <2 x i32> <i32 42, i32 11>
 
   ret <2 x i32> < i32 42, i32 11 >
@@ -48,11 +45,11 @@ define <2 x i32> @const_vector() {
 
 define <4 x i4> @call_vector(<4 x i4> %v) {
   ; CHECK-LABEL: @call_vector.dfsan
-  ; CHECK-NEXT: %[[V:.*]] = load i[[#SBITS]], ptr @__dfsan_arg_tls, align [[ALIGN:2]]
-  ; CHECK-NEXT: store i[[#SBITS]] %[[V]], ptr @__dfsan_arg_tls, align [[ALIGN]]
+  ; CHECK-NEXT: %[[V:.*]] = load i8, ptr @__dfsan_arg_tls, align [[ALIGN:2]]
+  ; CHECK-NEXT: store i8 %[[V]], ptr @__dfsan_arg_tls, align [[ALIGN]]
   ; CHECK-NEXT: %r = call <4 x i4> @pass_vector.dfsan(<4 x i4> %v)
-  ; CHECK-NEXT: %_dfsret = load i[[#SBITS]], ptr @__dfsan_retval_tls, align [[ALIGN]]
-  ; CHECK-NEXT: store i[[#SBITS]] %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: %_dfsret = load i8, ptr @__dfsan_retval_tls, align [[ALIGN]]
+  ; CHECK-NEXT: store i8 %_dfsret, ptr @__dfsan_retval_tls, align [[ALIGN]]
   ; CHECK-NEXT: ret <4 x i4> %r
 
   %r = call <4 x i4> @pass_vector(<4 x i4> %v)


        


More information about the llvm-commits mailing list