[clang] 8938a6c - [NFC] update test to make diff of the following commit clear

via cfe-commits cfe-commits at lists.llvm.org
Thu Jun 25 03:59:48 PDT 2020


Author: Tyker
Date: 2020-06-25T12:59:44+02:00
New Revision: 8938a6c9ede29a2d738357c44d66a017d74fcc7e

URL: https://github.com/llvm/llvm-project/commit/8938a6c9ede29a2d738357c44d66a017d74fcc7e
DIFF: https://github.com/llvm/llvm-project/commit/8938a6c9ede29a2d738357c44d66a017d74fcc7e.diff

LOG: [NFC] update test to make diff of the following commit clear

Added: 
    

Modified: 
    clang/test/CodeGen/align_value.cpp
    clang/test/CodeGen/alloc-align-attr.c
    clang/test/CodeGen/builtin-align.c
    clang/test/CodeGen/builtin-assume-aligned.c
    llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll
    llvm/test/Transforms/Inline/align.ll

Removed: 
    


################################################################################
diff  --git a/clang/test/CodeGen/align_value.cpp b/clang/test/CodeGen/align_value.cpp
index 3badcd74f46d..acbfbaf2ba5c 100644
--- a/clang/test/CodeGen/align_value.cpp
+++ b/clang/test/CodeGen/align_value.cpp
@@ -1,103 +1,192 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
 
 typedef double * __attribute__((align_value(64))) aligned_double;
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3fooPdS_Rd
+// CHECK-SAME: (double* align 64 [[X:%.*]], double* align 32 [[Y:%.*]], double* nonnull align 128 dereferenceable(8) [[Z:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double*, align 8
+// CHECK-NEXT:    [[Y_ADDR:%.*]] = alloca double*, align 8
+// CHECK-NEXT:    [[Z_ADDR:%.*]] = alloca double*, align 8
+// CHECK-NEXT:    store double* [[X]], double** [[X_ADDR]], align 8
+// CHECK-NEXT:    store double* [[Y]], double** [[Y_ADDR]], align 8
+// CHECK-NEXT:    store double* [[Z]], double** [[Z_ADDR]], align 8
+// CHECK-NEXT:    ret void
+//
 void foo(aligned_double x, double * y __attribute__((align_value(32))),
          double & z __attribute__((align_value(128)))) { };
-// CHECK: define void @_Z3fooPdS_Rd(double* align 64 %x, double* align 32 %y, double* nonnull align 128 dereferenceable(8) %z)
 
 struct ad_struct {
   aligned_double a;
 };
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3fooR9ad_struct
+// CHECK-SAME: (%struct.ad_struct* nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8
+// CHECK-NEXT:    store %struct.ad_struct* [[X]], %struct.ad_struct** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[A]], align 8
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    ret double* [[TMP1]]
+//
 double *foo(ad_struct& x) {
-// CHECK-LABEL: @_Z3fooR9ad_struct
 
-// CHECK: [[PTRINT1:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR1:%.+]] = and i64 [[PTRINT1]], 63
-// CHECK: [[MASKCOND1:%.+]] = icmp eq i64 [[MASKEDPTR1]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND1]])
   return x.a;
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3gooP9ad_struct
+// CHECK-SAME: (%struct.ad_struct* [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca %struct.ad_struct*, align 8
+// CHECK-NEXT:    store %struct.ad_struct* [[X]], %struct.ad_struct** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[A]], align 8
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    ret double* [[TMP1]]
+//
 double *goo(ad_struct *x) {
-// CHECK-LABEL: @_Z3gooP9ad_struct
 
-// CHECK: [[PTRINT2:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR2:%.+]] = and i64 [[PTRINT2]], 63
-// CHECK: [[MASKCOND2:%.+]] = icmp eq i64 [[MASKEDPTR2]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND2]])
   return x->a;
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3barPPd
+// CHECK-SAME: (double** [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
+// CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    ret double* [[TMP1]]
+//
 double *bar(aligned_double *x) {
-// CHECK-LABEL: @_Z3barPPd
 
-// CHECK: [[PTRINT3:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR3:%.+]] = and i64 [[PTRINT3]], 63
-// CHECK: [[MASKCOND3:%.+]] = icmp eq i64 [[MASKEDPTR3]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND3]])
   return *x;
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3carRPd
+// CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
+// CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    ret double* [[TMP1]]
+//
 double *car(aligned_double &x) {
-// CHECK-LABEL: @_Z3carRPd
 
-// CHECK: [[PTRINT4:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR4:%.+]] = and i64 [[PTRINT4]], 63
-// CHECK: [[MASKCOND4:%.+]] = icmp eq i64 [[MASKEDPTR4]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND4]])
   return x;
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3darPPd
+// CHECK-SAME: (double** [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
+// CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds double*, double** [[TMP0]], i64 5
+// CHECK-NEXT:    [[TMP1:%.*]] = load double*, double** [[ARRAYIDX]], align 8
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    ret double* [[TMP1]]
+//
 double *dar(aligned_double *x) {
-// CHECK-LABEL: @_Z3darPPd
 
-// CHECK: [[PTRINT5:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR5:%.+]] = and i64 [[PTRINT5]], 63
-// CHECK: [[MASKCOND5:%.+]] = icmp eq i64 [[MASKEDPTR5]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND5]])
   return x[5];
 }
 
 aligned_double eep();
+// CHECK-LABEL: define {{[^@]+}}@_Z3retv() #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CALL:%.*]] = call double* @_Z3eepv()
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint double* [[CALL]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    ret double* [[CALL]]
+//
 double *ret() {
-// CHECK-LABEL: @_Z3retv
 
-// CHECK: [[PTRINT6:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR6:%.+]] = and i64 [[PTRINT6]], 63
-// CHECK: [[MASKCOND6:%.+]] = icmp eq i64 [[MASKEDPTR6]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND6]])
   return eep();
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3no1PPd
+// CHECK-SAME: (double** [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
+// CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    ret double** [[TMP0]]
+//
 double **no1(aligned_double *x) {
-// CHECK-LABEL: @_Z3no1PPd
   return x;
-// CHECK-NOT: call void @llvm.assume
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3no2RPd
+// CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
+// CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    ret double** [[TMP0]]
+//
 double *&no2(aligned_double &x) {
-// CHECK-LABEL: @_Z3no2RPd
   return x;
-// CHECK-NOT: call void @llvm.assume
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3no3RPd
+// CHECK-SAME: (double** nonnull align 8 dereferenceable(8) [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double**, align 8
+// CHECK-NEXT:    store double** [[X]], double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8
+// CHECK-NEXT:    ret double** [[TMP0]]
+//
 double **no3(aligned_double &x) {
-// CHECK-LABEL: @_Z3no3RPd
   return &x;
-// CHECK-NOT: call void @llvm.assume
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3no3Pd
+// CHECK-SAME: (double* align 64 [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double*, align 8
+// CHECK-NEXT:    store double* [[X]], double** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[TMP0]], align 8
+// CHECK-NEXT:    ret double [[TMP1]]
+//
 double no3(aligned_double x) {
-// CHECK-LABEL: @_Z3no3Pd
   return *x;
-// CHECK-NOT: call void @llvm.assume
 }
 
+// CHECK-LABEL: define {{[^@]+}}@_Z3no4Pd
+// CHECK-SAME: (double* align 64 [[X:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[X_ADDR:%.*]] = alloca double*, align 8
+// CHECK-NEXT:    store double* [[X]], double** [[X_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load double*, double** [[X_ADDR]], align 8
+// CHECK-NEXT:    ret double* [[TMP0]]
+//
 double *no4(aligned_double x) {
-// CHECK-LABEL: @_Z3no4Pd
   return x;
-// CHECK-NOT: call void @llvm.assume
 }
 

diff  --git a/clang/test/CodeGen/alloc-align-attr.c b/clang/test/CodeGen/alloc-align-attr.c
index 6294450d0444..9517c50dbb1d 100644
--- a/clang/test/CodeGen/alloc-align-attr.c
+++ b/clang/test/CodeGen/alloc-align-attr.c
@@ -1,57 +1,90 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple x86_64-pc-linux -emit-llvm -o - %s | FileCheck %s
 
 __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));
 
 // Condition where parameter to m1 is not size_t.
+// CHECK-LABEL: define {{[^@]+}}@test1
+// CHECK-SAME: (i32 [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// CHECK-NEXT:    [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]])
+// CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64
+// CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
 __INT32_TYPE__ test1(__INT32_TYPE__ a) {
-// CHECK: define i32 @test1
   return *m1(a);
-// CHECK: call i32* @m1(i32 [[PARAM1:%[^\)]+]])
-// CHECK: [[ALIGNCAST1:%.+]] = zext i32 [[PARAM1]] to i64
-// CHECK: [[MASK1:%.+]] = sub i64 [[ALIGNCAST1]], 1
-// CHECK: [[PTRINT1:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR1:%.+]] = and i64 [[PTRINT1]], [[MASK1]]
-// CHECK: [[MASKCOND1:%.+]] = icmp eq i64 [[MASKEDPTR1]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND1]])
 }
 // Condition where test2 param needs casting.
+// CHECK-LABEL: define {{[^@]+}}@test2
+// CHECK-SAME: (i64 [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// CHECK-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
+// CHECK-NEXT:    [[CALL:%.*]] = call i32* @m1(i32 [[CONV]])
+// CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64
+// CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
 __INT32_TYPE__ test2(__SIZE_TYPE__ a) {
-// CHECK: define i32 @test2
   return *m1(a);
-// CHECK: [[CONV2:%.+]] = trunc i64 %{{.+}} to i32
-// CHECK: call i32* @m1(i32 [[CONV2]])
-// CHECK: [[ALIGNCAST2:%.+]] = zext i32 [[CONV2]] to i64
-// CHECK: [[MASK2:%.+]] = sub i64 [[ALIGNCAST2]], 1
-// CHECK: [[PTRINT2:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR2:%.+]] = and i64 [[PTRINT2]], [[MASK2]]
-// CHECK: [[MASKCOND2:%.+]] = icmp eq i64 [[MASKEDPTR2]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND2]])
 }
 __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));
 
 // test3 param needs casting, but 'm2' is correct.
+// CHECK-LABEL: define {{[^@]+}}@test3
+// CHECK-SAME: (i32 [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
+// CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[TMP0]] to i64
+// CHECK-NEXT:    [[CALL:%.*]] = call i32* @m2(i64 [[CONV]])
+// CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[CONV]], 1
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
 __INT32_TYPE__ test3(__INT32_TYPE__ a) {
-// CHECK: define i32 @test3
   return *m2(a);
-// CHECK: [[CONV3:%.+]] = sext i32 %{{.+}} to i64
-// CHECK: call i32* @m2(i64 [[CONV3]])
-// CHECK: [[MASK3:%.+]] = sub i64 [[CONV3]], 1
-// CHECK: [[PTRINT3:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR3:%.+]] = and i64 [[PTRINT3]], [[MASK3]]
-// CHECK: [[MASKCOND3:%.+]] = icmp eq i64 [[MASKEDPTR3]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND3]])
 }
 
 // Every type matches, canonical example.
+// CHECK-LABEL: define {{[^@]+}}@test4
+// CHECK-SAME: (i64 [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i64, align 8
+// CHECK-NEXT:    store i64 [[A]], i64* [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8
+// CHECK-NEXT:    [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]])
+// CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[TMP0]], 1
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
 __INT32_TYPE__ test4(__SIZE_TYPE__ a) {
-// CHECK: define i32 @test4
   return *m2(a);
-// CHECK: call i32* @m2(i64 [[PARAM4:%[^\)]+]])
-// CHECK: [[MASK4:%.+]] = sub i64 [[PARAM4]], 1
-// CHECK: [[PTRINT4:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR4:%.+]] = and i64 [[PTRINT4]], [[MASK4]]
-// CHECK: [[MASKCOND4:%.+]] = icmp eq i64 [[MASKEDPTR4]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND4]])
 }
 
 
@@ -60,30 +93,81 @@ struct MultiArgs { __INT64_TYPE__ a, b;};
 // Struct parameter doesn't take up an IR parameter, 'i' takes up 2.
 // Truncation to i64 is permissible, since alignments of greater than 2^64 are insane.
 __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)));
+// CHECK-LABEL: define {{[^@]+}}@test5
+// CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A:%.*]] = alloca i128, align 16
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i128, align 16
+// CHECK-NEXT:    [[E:%.*]] = alloca [[STRUCT_EMPTY:%.*]], align 1
+// CHECK-NEXT:    [[COERCE:%.*]] = alloca i128, align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }*
+// CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0
+// CHECK-NEXT:    store i64 [[A_COERCE0]], i64* [[TMP1]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1
+// CHECK-NEXT:    store i64 [[A_COERCE1]], i64* [[TMP2]], align 8
+// CHECK-NEXT:    [[A1:%.*]] = load i128, i128* [[A]], align 16
+// CHECK-NEXT:    store i128 [[A1]], i128* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16
+// CHECK-NEXT:    store i128 [[TMP3]], i128* [[COERCE]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }*
+// CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 16
+// CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
+// CHECK-NEXT:    [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]])
+// CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
+// CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP9]]
+//
 __INT32_TYPE__ test5(__int128_t a) {
-// CHECK: define i32 @test5
   struct Empty e;
   return *m3(e, a);
-// CHECK: call i32* @m3(i64 %{{.*}}, i64 %{{.*}})
-// CHECK: [[ALIGNCAST5:%.+]] = trunc i128 %{{.*}} to i64
-// CHECK: [[MASK5:%.+]] = sub i64 [[ALIGNCAST5]], 1
-// CHECK: [[PTRINT5:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR5:%.+]] = and i64 [[PTRINT5]], [[MASK5]]
-// CHECK: [[MASKCOND5:%.+]] = icmp eq i64 [[MASKEDPTR5]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND5]])
 }
 // Struct parameter takes up 2 parameters, 'i' takes up 2.
 __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(2)));
+// CHECK-LABEL: define {{[^@]+}}@test6
+// CHECK-SAME: (i64 [[A_COERCE0:%.*]], i64 [[A_COERCE1:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A:%.*]] = alloca i128, align 16
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i128, align 16
+// CHECK-NEXT:    [[E:%.*]] = alloca [[STRUCT_MULTIARGS:%.*]], align 8
+// CHECK-NEXT:    [[COERCE:%.*]] = alloca i128, align 16
+// CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128* [[A]] to { i64, i64 }*
+// CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 0
+// CHECK-NEXT:    store i64 [[A_COERCE0]], i64* [[TMP1]], align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP0]], i32 0, i32 1
+// CHECK-NEXT:    store i64 [[A_COERCE1]], i64* [[TMP2]], align 8
+// CHECK-NEXT:    [[A1:%.*]] = load i128, i128* [[A]], align 16
+// CHECK-NEXT:    store i128 [[A1]], i128* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = load i128, i128* [[A_ADDR]], align 16
+// CHECK-NEXT:    [[TMP4:%.*]] = bitcast %struct.MultiArgs* [[E]] to { i64, i64 }*
+// CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP6:%.*]] = load i64, i64* [[TMP5]], align 8
+// CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8
+// CHECK-NEXT:    store i128 [[TMP3]], i128* [[COERCE]], align 16
+// CHECK-NEXT:    [[TMP9:%.*]] = bitcast i128* [[COERCE]] to { i64, i64 }*
+// CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 0
+// CHECK-NEXT:    [[TMP11:%.*]] = load i64, i64* [[TMP10]], align 16
+// CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1
+// CHECK-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
+// CHECK-NEXT:    [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]])
+// CHECK-NEXT:    [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64
+// CHECK-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP14]]
+//
 __INT32_TYPE__ test6(__int128_t a) {
-// CHECK: define i32 @test6
   struct MultiArgs e;
   return *m4(e, a);
-// CHECK: call i32* @m4(i64 %{{.*}}, i64 %{{.*}}, i64 %{{.*}})
-// CHECK: [[ALIGNCAST6:%.+]] = trunc i128 %{{.*}} to i64
-// CHECK: [[MASK6:%.+]] = sub i64 [[ALIGNCAST6]], 1
-// CHECK: [[PTRINT6:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR6:%.+]] = and i64 [[PTRINT6]], [[MASK6]]
-// CHECK: [[MASKCOND6:%.+]] = icmp eq i64 [[MASKEDPTR6]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND6]])
 }
 

diff  --git a/clang/test/CodeGen/builtin-align.c b/clang/test/CodeGen/builtin-align.c
index 8fb51822164d..7e66e2b5c0b9 100644
--- a/clang/test/CodeGen/builtin-align.c
+++ b/clang/test/CodeGen/builtin-align.c
@@ -1,21 +1,22 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 /// Check the code generation for the alignment builtins
 /// To make the test case easier to read, run SROA after generating IR to remove the alloca instructions.
 // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_VOID_PTR \
 // RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
-// RUN:   FileCheck %s -check-prefixes CHECK,POINTER,ALIGNMENT_EXT \
+// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-VOID_PTR \
 // RUN:   -enable-var-scope '-D$PTRTYPE=i8'
 // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_FLOAT_PTR \
 // RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
-// RUN:   FileCheck %s -check-prefixes CHECK,POINTER,NON_I8_POINTER,ALIGNMENT_EXT \
+// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-FLOAT_PTR \
 // RUN:   -enable-var-scope '-D$PTRTYPE=f32'
 // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_LONG \
 // RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
-// RUN:   FileCheck %s -check-prefixes CHECK,INTEGER,ALIGNMENT_EXT -enable-var-scope
+// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-LONG -enable-var-scope
 /// Check that we can handle the case where the alignment parameter is wider
 /// than the source type (generate a trunc on alignment instead of zext)
 // RUN: %clang_cc1 -triple=x86_64-unknown-unknown -DTEST_USHORT \
 // RUN:   -o - -emit-llvm %s -disable-O0-optnone | opt -S -sroa | \
-// RUN:   FileCheck %s -check-prefixes CHECK,INTEGER,ALIGNMENT_TRUNC -enable-var-scope
+// RUN:   FileCheck %s -check-prefixes CHECK,CHECK-USHORT -enable-var-scope
 
 
 #ifdef TEST_VOID_PTR
@@ -24,8 +25,6 @@
 #define TYPE float *
 #elif defined(TEST_LONG)
 #define TYPE long
-#elif defined(TEST_CAP)
-#define TYPE void *__capability
 #elif defined(TEST_USHORT)
 #define TYPE unsigned short
 #else
@@ -49,78 +48,185 @@ int up_2 = __builtin_align_up(256, 32);
 // CHECK: @up_2 = global i32 256, align 4
 
 /// Capture the IR type here to use in the remaining FileCheck captures:
-// CHECK: define {{[^@]+}}@get_type() #0
-// CHECK-NEXT:  entry:
-// POINTER-NEXT:    ret [[$TYPE:.+]] null
-// INTEGER-NEXT:    ret [[$TYPE:.+]] 0
+// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@get_type() #0
+// CHECK-VOID_PTR-NEXT:  entry:
+// CHECK-VOID_PTR-NEXT:    ret i8* null
+//
+// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@get_type() #0
+// CHECK-FLOAT_PTR-NEXT:  entry:
+// CHECK-FLOAT_PTR-NEXT:    ret float* null
+//
+// CHECK-LONG-LABEL: define {{[^@]+}}@get_type() #0
+// CHECK-LONG-NEXT:  entry:
+// CHECK-LONG-NEXT:    ret i64 0
+//
+// CHECK-USHORT-LABEL: define {{[^@]+}}@get_type() #0
+// CHECK-USHORT-NEXT:  entry:
+// CHECK-USHORT-NEXT:    ret i16 0
 //
 TYPE get_type(void) {
   return (TYPE)0;
 }
 
-// CHECK-LABEL: define {{[^@]+}}@is_aligned
-// CHECK-SAME: ([[$TYPE]] {{[^%]*}}[[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
-// CHECK-NEXT:  entry:
-// ALIGNMENT_EXT-NEXT:   [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to [[ALIGN_TYPE:i64]]
-// ALIGNMENT_TRUNC-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to [[ALIGN_TYPE:i16]]
-// CHECK-NEXT:           [[MASK:%.*]] = sub [[ALIGN_TYPE]] [[ALIGNMENT]], 1
-// POINTER-NEXT:         [[PTR:%.*]] = ptrtoint [[$TYPE]] %ptr to i64
-// CHECK-NEXT:           [[SET_BITS:%.*]] = and [[ALIGN_TYPE]] [[PTR]], [[MASK]]
-// CHECK-NEXT:           [[IS_ALIGNED:%.*]] = icmp eq [[ALIGN_TYPE]] [[SET_BITS]], 0
-// CHECK-NEXT:           ret i1 [[IS_ALIGNED]]
+// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@is_aligned
+// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-VOID_PTR-NEXT:  entry:
+// CHECK-VOID_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-VOID_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-VOID_PTR-NEXT:    [[SRC_ADDR:%.*]] = ptrtoint i8* [[PTR]] to i64
+// CHECK-VOID_PTR-NEXT:    [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]]
+// CHECK-VOID_PTR-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
+// CHECK-VOID_PTR-NEXT:    ret i1 [[IS_ALIGNED]]
+//
+// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@is_aligned
+// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-FLOAT_PTR-NEXT:  entry:
+// CHECK-FLOAT_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-FLOAT_PTR-NEXT:    [[SRC_ADDR:%.*]] = ptrtoint float* [[PTR]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], [[MASK]]
+// CHECK-FLOAT_PTR-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
+// CHECK-FLOAT_PTR-NEXT:    ret i1 [[IS_ALIGNED]]
+//
+// CHECK-LONG-LABEL: define {{[^@]+}}@is_aligned
+// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-LONG-NEXT:  entry:
+// CHECK-LONG-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-LONG-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-LONG-NEXT:    [[SET_BITS:%.*]] = and i64 [[PTR]], [[MASK]]
+// CHECK-LONG-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0
+// CHECK-LONG-NEXT:    ret i1 [[IS_ALIGNED]]
+//
+// CHECK-USHORT-LABEL: define {{[^@]+}}@is_aligned
+// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-USHORT-NEXT:  entry:
+// CHECK-USHORT-NEXT:    [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16
+// CHECK-USHORT-NEXT:    [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1
+// CHECK-USHORT-NEXT:    [[SET_BITS:%.*]] = and i16 [[PTR]], [[MASK]]
+// CHECK-USHORT-NEXT:    [[IS_ALIGNED:%.*]] = icmp eq i16 [[SET_BITS]], 0
+// CHECK-USHORT-NEXT:    ret i1 [[IS_ALIGNED]]
 //
 _Bool is_aligned(TYPE ptr, unsigned align) {
   return __builtin_is_aligned(ptr, align);
 }
 
-// CHECK-LABEL: define {{[^@]+}}@align_up
-// CHECK-SAME: ([[$TYPE]] {{[^%]*}}[[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
-// CHECK-NEXT:  entry:
-// ALIGNMENT_EXT-NEXT:   [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to [[ALIGN_TYPE:i64]]
-// ALIGNMENT_TRUNC-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to [[ALIGN_TYPE:i16]]
-// CHECK-NEXT:           [[MASK:%.*]] = sub [[ALIGN_TYPE]] [[ALIGNMENT]], 1
-// INTEGER-NEXT:         [[OVER_BOUNDARY:%.*]] = add [[$TYPE]] [[PTR]], [[MASK]]
 // NOTYET-POINTER-NEXT:  [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0i8.i64(i8* [[OVER_BOUNDARY]], [[ALIGN_TYPE]] [[INVERTED_MASK]])
-// POINTER-NEXT:         [[INTPTR:%.*]] = ptrtoint [[$TYPE]] [[PTR]] to [[ALIGN_TYPE]]
-// POINTER-NEXT:         [[OVER_BOUNDARY:%.*]] = add [[ALIGN_TYPE]] [[INTPTR]], [[MASK]]
-// CHECK-NEXT:           [[INVERTED_MASK:%.*]] = xor [[ALIGN_TYPE]] [[MASK]], -1
-// CHECK-NEXT:           [[ALIGNED_RESULT:%.*]] = and [[ALIGN_TYPE]] [[OVER_BOUNDARY]], [[INVERTED_MASK]]
-// POINTER-NEXT:         [[DIFF:%.*]] = sub i64 [[ALIGNED_RESULT]], [[INTPTR]]
-// NON_I8_POINTER-NEXT:  [[PTR:%.*]] = bitcast [[$TYPE]] {{%.*}} to i8*
-// POINTER-NEXT:         [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
-// NON_I8_POINTER-NEXT:  [[ALIGNED_RESULT:%.*]] = bitcast i8* {{%.*}} to [[$TYPE]]
-// POINTER-NEXT:         [[ASSUME_MASK:%.*]] = sub i64 %alignment, 1
-// POINTER-NEXT:         [[ASSUME_INTPTR:%.*]]= ptrtoint [[$TYPE]] [[ALIGNED_RESULT]] to i64
-// POINTER-NEXT:         [[MASKEDPTR:%.*]] = and i64 %ptrint, [[ASSUME_MASK]]
-// POINTER-NEXT:         [[MASKEDCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
-// POINTER-NEXT:         call void @llvm.assume(i1 [[MASKEDCOND]])
-// CHECK-NEXT:           ret [[$TYPE]] [[ALIGNED_RESULT]]
+// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@align_up
+// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-VOID_PTR-NEXT:  entry:
+// CHECK-VOID_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-VOID_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-VOID_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint i8* [[PTR]] to i64
+// CHECK-VOID_PTR-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]]
+// CHECK-VOID_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
+// CHECK-VOID_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
+// CHECK-VOID_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
+// CHECK-VOID_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
+// CHECK-VOID_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-VOID_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
+// CHECK-VOID_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-VOID_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-VOID_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-VOID_PTR-NEXT:    ret i8* [[ALIGNED_RESULT]]
+//
+// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_up
+// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-FLOAT_PTR-NEXT:  entry:
+// CHECK-FLOAT_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-FLOAT_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint float* [[PTR]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[INTPTR]], [[MASK]]
+// CHECK-FLOAT_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
+// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
+// CHECK-FLOAT_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
+// CHECK-FLOAT_PTR-NEXT:    [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*
+// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]
+// CHECK-FLOAT_PTR-NEXT:    [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*
+// CHECK-FLOAT_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-FLOAT_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-FLOAT_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-FLOAT_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-FLOAT_PTR-NEXT:    ret float* [[TMP1]]
+//
+// CHECK-LONG-LABEL: define {{[^@]+}}@align_up
+// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-LONG-NEXT:  entry:
+// CHECK-LONG-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-LONG-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-LONG-NEXT:    [[OVER_BOUNDARY:%.*]] = add i64 [[PTR]], [[MASK]]
+// CHECK-LONG-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
+// CHECK-LONG-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
+// CHECK-LONG-NEXT:    ret i64 [[ALIGNED_RESULT]]
+//
+// CHECK-USHORT-LABEL: define {{[^@]+}}@align_up
+// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-USHORT-NEXT:  entry:
+// CHECK-USHORT-NEXT:    [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16
+// CHECK-USHORT-NEXT:    [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1
+// CHECK-USHORT-NEXT:    [[OVER_BOUNDARY:%.*]] = add i16 [[PTR]], [[MASK]]
+// CHECK-USHORT-NEXT:    [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1
+// CHECK-USHORT-NEXT:    [[ALIGNED_RESULT:%.*]] = and i16 [[OVER_BOUNDARY]], [[INVERTED_MASK]]
+// CHECK-USHORT-NEXT:    ret i16 [[ALIGNED_RESULT]]
 //
 TYPE align_up(TYPE ptr, unsigned align) {
   return __builtin_align_up(ptr, align);
 }
 
-// CHECK-LABEL: define {{[^@]+}}@align_down
-// CHECK-SAME: ([[$TYPE]] {{[^%]*}}[[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
-// CHECK-NEXT:  entry:
-// ALIGNMENT_EXT-NEXT:   [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to [[ALIGN_TYPE:i64]]
-// ALIGNMENT_TRUNC-NEXT: [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to [[ALIGN_TYPE:i16]]
-// CHECK-NEXT:           [[MASK:%.*]] = sub [[ALIGN_TYPE]] [[ALIGNMENT]], 1
 // NOTYET-POINTER-NEXT:  [[ALIGNED_RESULT:%.*]] = call [[$TYPE]] @llvm.ptrmask.p0[[$PTRTYPE]].p0[[$PTRTYPE]].i64([[$TYPE]] [[PTR]], [[ALIGN_TYPE]] [[INVERTED_MASK]])
-// POINTER-NEXT:         [[INTPTR:%.*]] = ptrtoint [[$TYPE]] [[PTR]] to [[ALIGN_TYPE]]
-// CHECK-NEXT:           [[INVERTED_MASK:%.*]] = xor [[ALIGN_TYPE]] [[MASK]], -1
-// POINTER-NEXT:         [[ALIGNED_INTPTR:%.*]] = and [[ALIGN_TYPE]] [[INTPTR]], [[INVERTED_MASK]]
-// POINTER-NEXT:         [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
-// NON_I8_POINTER-NEXT:  [[PTR:%.*]] = bitcast [[$TYPE]] {{%.*}} to i8*
-// POINTER-NEXT:         [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
-// NON_I8_POINTER-NEXT:  [[ALIGNED_RESULT:%.*]] = bitcast i8* {{%.*}} to [[$TYPE]]
-// INTEGER-NEXT:         [[ALIGNED_RESULT:%.*]] = and [[ALIGN_TYPE]] [[PTR]], [[INVERTED_MASK]]
-// POINTER-NEXT:         [[ASSUME_MASK:%.*]] = sub i64 %alignment, 1
-// POINTER-NEXT:         [[ASSUME_INTPTR:%.*]]= ptrtoint [[$TYPE]] [[ALIGNED_RESULT]] to i64
-// POINTER-NEXT:         [[MASKEDPTR:%.*]] = and i64 %ptrint, [[ASSUME_MASK]]
-// POINTER-NEXT:         [[MASKEDCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
-// POINTER-NEXT:         call void @llvm.assume(i1 [[MASKEDCOND]])
-// CHECK-NEXT:           ret [[$TYPE]] [[ALIGNED_RESULT]]
+// CHECK-VOID_PTR-LABEL: define {{[^@]+}}@align_down
+// CHECK-VOID_PTR-SAME: (i8* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-VOID_PTR-NEXT:  entry:
+// CHECK-VOID_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-VOID_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-VOID_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint i8* [[PTR]] to i64
+// CHECK-VOID_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
+// CHECK-VOID_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]]
+// CHECK-VOID_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
+// CHECK-VOID_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]
+// CHECK-VOID_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-VOID_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64
+// CHECK-VOID_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-VOID_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-VOID_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-VOID_PTR-NEXT:    ret i8* [[ALIGNED_RESULT]]
+//
+// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_down
+// CHECK-FLOAT_PTR-SAME: (float* [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-FLOAT_PTR-NEXT:  entry:
+// CHECK-FLOAT_PTR-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-FLOAT_PTR-NEXT:    [[INTPTR:%.*]] = ptrtoint float* [[PTR]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
+// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]]
+// CHECK-FLOAT_PTR-NEXT:    [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]
+// CHECK-FLOAT_PTR-NEXT:    [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*
+// CHECK-FLOAT_PTR-NEXT:    [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]
+// CHECK-FLOAT_PTR-NEXT:    [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*
+// CHECK-FLOAT_PTR-NEXT:    [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-FLOAT_PTR-NEXT:    [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64
+// CHECK-FLOAT_PTR-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]
+// CHECK-FLOAT_PTR-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-FLOAT_PTR-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-FLOAT_PTR-NEXT:    ret float* [[TMP1]]
+//
+// CHECK-LONG-LABEL: define {{[^@]+}}@align_down
+// CHECK-LONG-SAME: (i64 [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-LONG-NEXT:  entry:
+// CHECK-LONG-NEXT:    [[ALIGNMENT:%.*]] = zext i32 [[ALIGN]] to i64
+// CHECK-LONG-NEXT:    [[MASK:%.*]] = sub i64 [[ALIGNMENT]], 1
+// CHECK-LONG-NEXT:    [[INVERTED_MASK:%.*]] = xor i64 [[MASK]], -1
+// CHECK-LONG-NEXT:    [[ALIGNED_RESULT:%.*]] = and i64 [[PTR]], [[INVERTED_MASK]]
+// CHECK-LONG-NEXT:    ret i64 [[ALIGNED_RESULT]]
+//
+// CHECK-USHORT-LABEL: define {{[^@]+}}@align_down
+// CHECK-USHORT-SAME: (i16 zeroext [[PTR:%.*]], i32 [[ALIGN:%.*]]) #0
+// CHECK-USHORT-NEXT:  entry:
+// CHECK-USHORT-NEXT:    [[ALIGNMENT:%.*]] = trunc i32 [[ALIGN]] to i16
+// CHECK-USHORT-NEXT:    [[MASK:%.*]] = sub i16 [[ALIGNMENT]], 1
+// CHECK-USHORT-NEXT:    [[INVERTED_MASK:%.*]] = xor i16 [[MASK]], -1
+// CHECK-USHORT-NEXT:    [[ALIGNED_RESULT:%.*]] = and i16 [[PTR]], [[INVERTED_MASK]]
+// CHECK-USHORT-NEXT:    ret i16 [[ALIGNED_RESULT]]
 //
 TYPE align_down(TYPE ptr, unsigned align) {
   return __builtin_align_down(ptr, align);

diff  --git a/clang/test/CodeGen/builtin-assume-aligned.c b/clang/test/CodeGen/builtin-assume-aligned.c
index 40532d914f7a..90693cc21520 100644
--- a/clang/test/CodeGen/builtin-assume-aligned.c
+++ b/clang/test/CodeGen/builtin-assume-aligned.c
@@ -1,50 +1,106 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -emit-llvm -o - %s | FileCheck %s
 
-// CHECK-LABEL: @test1
+// CHECK-LABEL: define {{[^@]+}}@test1
+// CHECK-SAME: (i32* [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
+// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    ret i32 [[TMP4]]
+//
 int test1(int *a) {
-// CHECK: [[PTRINT1:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR1:%.+]] = and i64 [[PTRINT1]], 31
-// CHECK: [[MASKCOND1:%.+]] = icmp eq i64 [[MASKEDPTR1]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND1]])
   a = __builtin_assume_aligned(a, 32, 0ull);
   return a[0];
 }
 
-// CHECK-LABEL: @test2
+// CHECK-LABEL: define {{[^@]+}}@test2
+// CHECK-SAME: (i32* [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
+// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    ret i32 [[TMP4]]
+//
 int test2(int *a) {
-// CHECK: [[PTRINT2:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR2:%.+]] = and i64 [[PTRINT2]], 31
-// CHECK: [[MASKCOND2:%.+]] = icmp eq i64 [[MASKEDPTR2]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND2]])
   a = __builtin_assume_aligned(a, 32, 0);
   return a[0];
 }
 
-// CHECK-LABEL: @test3
+// CHECK-LABEL: define {{[^@]+}}@test3
+// CHECK-SAME: (i32* [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
+// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    ret i32 [[TMP4]]
+//
 int test3(int *a) {
-// CHECK: [[PTRINT3:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR3:%.+]] = and i64 [[PTRINT3]], 31
-// CHECK: [[MASKCOND3:%.+]] = icmp eq i64 [[MASKEDPTR3]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND3]])
   a = __builtin_assume_aligned(a, 32);
   return a[0];
 }
 
-// CHECK-LABEL: @test4
+// CHECK-LABEL: define {{[^@]+}}@test4
+// CHECK-SAME: (i32* [[A:%.*]], i32 [[B:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    [[B_ADDR:%.*]] = alloca i32, align 4
+// CHECK-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    store i32 [[B]], i32* [[B_ADDR]], align 4
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
+// CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4
+// CHECK-NEXT:    [[CONV:%.*]] = sext i32 [[TMP2]] to i64
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT:    [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], [[CONV]]
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32*
+// CHECK-NEXT:    store i32* [[TMP3]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP4]], i64 0
+// CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    ret i32 [[TMP5]]
+//
 int test4(int *a, int b) {
-// CHECK-DAG: [[PTRINT4:%.+]] = ptrtoint
-// CHECK-DAG: [[CONV4:%.+]] = sext i32
-// CHECK: [[OFFSETPTR4:%.+]] = sub i64 [[PTRINT4]], [[CONV4]]
-// CHECK: [[MASKEDPTR4:%.+]] = and i64 [[OFFSETPTR4]], 31
-// CHECK: [[MASKCOND4:%.+]] = icmp eq i64 [[MASKEDPTR4]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND4]])
   a = __builtin_assume_aligned(a, 32, b);
   return a[0];
 }
 
 int *m1() __attribute__((assume_aligned(64)));
 
-// CHECK-LABEL: @test5(
+// CHECK-LABEL: define {{[^@]+}}@test5() #0
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    [[CALL:%.*]] = call align 64 i32* (...) @m1()
 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4
@@ -56,22 +112,40 @@ int test5() {
 
 int *m2() __attribute__((assume_aligned(64, 12)));
 
-// CHECK-LABEL: @test6
+// CHECK-LABEL: define {{[^@]+}}@test6() #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CALL:%.*]] = call i32* (...) @m2()
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64
+// CHECK-NEXT:    [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], 12
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 63
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
 int test6() {
   return *m2();
-// CHECK: [[PTRINT6:%.+]] = ptrtoint
-// CHECK: [[OFFSETPTR6:%.+]] = sub i64 [[PTRINT6]], 12
-// CHECK: [[MASKEDPTR6:%.+]] = and i64 [[OFFSETPTR6]], 63
-// CHECK: [[MASKCOND6:%.+]] = icmp eq i64 [[MASKEDPTR6]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND6]])
 }
 
-// CHECK-LABEL: @pr43638
+// CHECK-LABEL: define {{[^@]+}}@pr43638
+// CHECK-SAME: (i32* [[A:%.*]]) #0
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
+// CHECK-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*
+// CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64
+// CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 536870911
+// CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+// CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+// CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*
+// CHECK-NEXT:    store i32* [[TMP2]], i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8
+// CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP3]], i64 0
+// CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+// CHECK-NEXT:    ret i32 [[TMP4]]
+//
 int pr43638(int *a) {
   a = __builtin_assume_aligned(a, 4294967296);
 return a[0];
-// CHECK: [[PTRINT7:%.+]] = ptrtoint
-// CHECK: [[MASKEDPTR7:%.+]] = and i64 [[PTRINT7]], 536870911
-// CHECK: [[MASKCOND7:%.+]] = icmp eq i64 [[MASKEDPTR7]], 0
-// CHECK: call void @llvm.assume(i1 [[MASKCOND7]])
 }

diff  --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll
index 5aabe9518415..3f0819e3641b 100644
--- a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll
@@ -1,8 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64"
 ; RUN: opt < %s -alignment-from-assumptions -S | FileCheck %s
 ; RUN: opt < %s -passes=alignment-from-assumptions -S | FileCheck %s
 
 define i32 @foo(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@foo
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -11,12 +22,21 @@ entry:
   %0 = load i32, i32* %a, align 4
   ret i32 %0
 
-; CHECK-LABEL: @foo
-; CHECK: load i32, i32* {{[^,]+}}, align 32
-; CHECK: ret i32
 }
 
 define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@foo2
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 24
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %offsetptr = add i64 %ptrint, 24
@@ -27,12 +47,21 @@ entry:
   %0 = load i32, i32* %arrayidx, align 4
   ret i32 %0
 
-; CHECK-LABEL: @foo2
-; CHECK: load i32, i32* {{[^,]+}}, align 16
-; CHECK: ret i32
 }
 
 define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@foo2a
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 28
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 -1
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %offsetptr = add i64 %ptrint, 28
@@ -43,12 +72,19 @@ entry:
   %0 = load i32, i32* %arrayidx, align 4
   ret i32 %0
 
-; CHECK-LABEL: @foo2a
-; CHECK: load i32, i32* {{[^,]+}}, align 32
-; CHECK: ret i32
 }
 
 define i32 @goo(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@goo
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A]], align 32
+; CHECK-NEXT:    ret i32 [[TMP0]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -57,12 +93,31 @@ entry:
   %0 = load i32, i32* %a, align 4
   ret i32 %0
 
-; CHECK-LABEL: @goo
-; CHECK: load i32, i32* {{[^,]+}}, align 32
-; CHECK: ret i32
 }
 
 define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@hoo
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32
+; CHECK-NEXT:    [[ADD]] = add nsw i32 [[TMP0]], [[R_06]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[ADD_LCSSA]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -85,12 +140,31 @@ for.end:                                          ; preds = %for.body
   %add.lcssa = phi i32 [ %add, %for.body ]
   ret i32 %add.lcssa
 
-; CHECK-LABEL: @hoo
-; CHECK: load i32, i32* %arrayidx, align 32
-; CHECK: ret i32 %add.lcssa
 }
 
 define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@joo
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16
+; CHECK-NEXT:    [[ADD]] = add nsw i32 [[TMP0]], [[R_06]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 8
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[ADD_LCSSA]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -113,12 +187,31 @@ for.end:                                          ; preds = %for.body
   %add.lcssa = phi i32 [ %add, %for.body ]
   ret i32 %add.lcssa
 
-; CHECK-LABEL: @joo
-; CHECK: load i32, i32* %arrayidx, align 16
-; CHECK: ret i32 %add.lcssa
 }
 
 define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@koo
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16
+; CHECK-NEXT:    [[ADD]] = add nsw i32 [[TMP0]], [[R_06]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[ADD_LCSSA]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -141,12 +234,31 @@ for.end:                                          ; preds = %for.body
   %add.lcssa = phi i32 [ %add, %for.body ]
   ret i32 %add.lcssa
 
-; CHECK-LABEL: @koo
-; CHECK: load i32, i32* %arrayidx, align 16
-; CHECK: ret i32 %add.lcssa
 }
 
 define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {
+; CHECK-LABEL: define {{[^@]+}}@koo2
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ -4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[R_06:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16
+; CHECK-NEXT:    [[ADD]] = add nsw i32 [[TMP0]], [[R_06]]
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 4
+; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 2048
+; CHECK-NEXT:    br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_END:%.*]]
+; CHECK:       for.end:
+; CHECK-NEXT:    [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ]
+; CHECK-NEXT:    ret i32 [[ADD_LCSSA]]
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -169,12 +281,20 @@ for.end:                                          ; preds = %for.body
   %add.lcssa = phi i32 [ %add, %for.body ]
   ret i32 %add.lcssa
 
-; CHECK-LABEL: @koo2
-; CHECK: load i32, i32* %arrayidx, align 16
-; CHECK: ret i32 %add.lcssa
 }
 
 define i32 @moo(i32* nocapture %a) nounwind uwtable {
+; CHECK-LABEL: define {{[^@]+}}@moo
+; CHECK-SAME: (i32* nocapture [[A:%.*]]) #1
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[A]] to i8*
+; CHECK-NEXT:    tail call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP0]], i8 0, i64 64, i1 false)
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -184,12 +304,25 @@ entry:
   tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)
   ret i32 undef
 
-; CHECK-LABEL: @moo
-; CHECK: @llvm.memset.p0i8.i64(i8* align 32 %0, i8 0, i64 64, i1 false)
-; CHECK: ret i32 undef
 }
 
 define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
+; CHECK-LABEL: define {{[^@]+}}@moo2
+; CHECK-SAME: (i32* nocapture [[A:%.*]], i32* nocapture [[B:%.*]]) #1
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[PTRINT1:%.*]] = ptrtoint i32* [[B]] to i64
+; CHECK-NEXT:    [[MASKEDPTR3:%.*]] = and i64 [[PTRINT1]], 127
+; CHECK-NEXT:    [[MASKCOND4:%.*]] = icmp eq i64 [[MASKEDPTR3]], 0
+; CHECK-NEXT:    tail call void @llvm.assume(i1 [[MASKCOND4]])
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i32* [[A]] to i8*
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i32* [[B]] to i8*
+; CHECK-NEXT:    tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[TMP0]], i8* align 128 [[TMP1]], i64 64, i1 false)
+; CHECK-NEXT:    ret i32 undef
+;
 entry:
   %ptrint = ptrtoint i32* %a to i64
   %maskedptr = and i64 %ptrint, 31
@@ -204,9 +337,6 @@ entry:
   tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)
   ret i32 undef
 
-; CHECK-LABEL: @moo2
-; CHECK: @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %0, i8* align 128 %1, i64 64, i1 false)
-; CHECK: ret i32 undef
 }
 
 declare void @llvm.assume(i1) nounwind

diff  --git a/llvm/test/Transforms/Inline/align.ll b/llvm/test/Transforms/Inline/align.ll
index c91fe8046174..ede6c3fa7bcf 100644
--- a/llvm/test/Transforms/Inline/align.ll
+++ b/llvm/test/Transforms/Inline/align.ll
@@ -1,8 +1,17 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature
 ; RUN: opt -inline -preserve-alignment-assumptions-during-inlining -S < %s | FileCheck %s
 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
 target triple = "x86_64-unknown-linux-gnu"
 
 define void @hello(float* align 128 nocapture %a, float* nocapture readonly %c) #0 {
+; CHECK-LABEL: define {{[^@]+}}@hello
+; CHECK-SAME: (float* nocapture align 128 [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %0 = load float, float* %c, align 4
   %arrayidx = getelementptr inbounds float, float* %a, i64 5
@@ -11,6 +20,21 @@ entry:
 }
 
 define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
+; CHECK-LABEL: define {{[^@]+}}@foo
+; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   tail call void @hello(float* %a, float* %c)
   %0 = load float, float* %c, align 4
@@ -19,22 +43,18 @@ entry:
   ret void
 }
 
-; CHECK: define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {
-; CHECK: entry:
-; CHECK:   %ptrint = ptrtoint float* %a to i64
-; CHECK:   %maskedptr = and i64 %ptrint, 127
-; CHECK:   %maskcond = icmp eq i64 %maskedptr, 0
-; CHECK:   call void @llvm.assume(i1 %maskcond)
-; CHECK:   %0 = load float, float* %c, align 4
-; CHECK:   %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
-; CHECK:   store float %0, float* %arrayidx.i, align 4
-; CHECK:   %1 = load float, float* %c, align 4
-; CHECK:   %arrayidx = getelementptr inbounds float, float* %a, i64 7
-; CHECK:   store float %1, float* %arrayidx, align 4
-; CHECK:   ret void
-; CHECK: }
-
 define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 {
+; CHECK-LABEL: define {{[^@]+}}@fooa
+; CHECK-SAME: (float* nocapture align 128 [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   tail call void @hello(float* %a, float* %c)
   %0 = load float, float* %c, align 4
@@ -43,18 +63,17 @@ entry:
   ret void
 }
 
-; CHECK: define void @fooa(float* nocapture align 128 %a, float* nocapture readonly %c) #0 {
-; CHECK: entry:
-; CHECK:   %0 = load float, float* %c, align 4
-; CHECK:   %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
-; CHECK:   store float %0, float* %arrayidx.i, align 4
-; CHECK:   %1 = load float, float* %c, align 4
-; CHECK:   %arrayidx = getelementptr inbounds float, float* %a, i64 7
-; CHECK:   store float %1, float* %arrayidx, align 4
-; CHECK:   ret void
-; CHECK: }
-
 define void @hello2(float* align 128 nocapture %a, float* align 128 nocapture %b, float* nocapture readonly %c) #0 {
+; CHECK-LABEL: define {{[^@]+}}@hello2
+; CHECK-SAME: (float* nocapture align 128 [[A:%.*]], float* nocapture align 128 [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   %0 = load float, float* %c, align 4
   %arrayidx = getelementptr inbounds float, float* %a, i64 5
@@ -65,6 +84,27 @@ entry:
 }
 
 define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
+; CHECK-LABEL: define {{[^@]+}}@foo2
+; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64
+; CHECK-NEXT:    [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127
+; CHECK-NEXT:    [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND]])
+; CHECK-NEXT:    [[PTRINT1:%.*]] = ptrtoint float* [[B]] to i64
+; CHECK-NEXT:    [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 127
+; CHECK-NEXT:    [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[MASKCOND3]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5
+; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX_I]], align 4
+; CHECK-NEXT:    [[ARRAYIDX1_I:%.*]] = getelementptr inbounds float, float* [[B]], i64 8
+; CHECK-NEXT:    store float [[TMP0]], float* [[ARRAYIDX1_I]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[C]], align 4
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[A]], i64 7
+; CHECK-NEXT:    store float [[TMP1]], float* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    ret void
+;
 entry:
   tail call void @hello2(float* %a, float* %b, float* %c)
   %0 = load float, float* %c, align 4
@@ -73,26 +113,5 @@ entry:
   ret void
 }
 
-; CHECK: define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
-; CHECK: entry:
-; CHECK:   %ptrint = ptrtoint float* %a to i64
-; CHECK:   %maskedptr = and i64 %ptrint, 127
-; CHECK:   %maskcond = icmp eq i64 %maskedptr, 0
-; CHECK:   call void @llvm.assume(i1 %maskcond)
-; CHECK:   %ptrint1 = ptrtoint float* %b to i64
-; CHECK:   %maskedptr2 = and i64 %ptrint1, 127
-; CHECK:   %maskcond3 = icmp eq i64 %maskedptr2, 0
-; CHECK:   call void @llvm.assume(i1 %maskcond3)
-; CHECK:   %0 = load float, float* %c, align 4
-; CHECK:   %arrayidx.i = getelementptr inbounds float, float* %a, i64 5
-; CHECK:   store float %0, float* %arrayidx.i, align 4
-; CHECK:   %arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
-; CHECK:   store float %0, float* %arrayidx1.i, align 4
-; CHECK:   %1 = load float, float* %c, align 4
-; CHECK:   %arrayidx = getelementptr inbounds float, float* %a, i64 7
-; CHECK:   store float %1, float* %arrayidx, align 4
-; CHECK:   ret void
-; CHECK: }
-
 attributes #0 = { nounwind uwtable }
 


        


More information about the cfe-commits mailing list