[clang] f3fa108 - [Clang] Convert some tests to opaque pointers (NFC)
Nikita Popov via cfe-commits
cfe-commits at lists.llvm.org
Fri Feb 17 02:56:09 PST 2023
Author: Nikita Popov
Date: 2023-02-17T11:56:00+01:00
New Revision: f3fa1086c7f83edcc473724a8ac7d675a9df11d7
URL: https://github.com/llvm/llvm-project/commit/f3fa1086c7f83edcc473724a8ac7d675a9df11d7
DIFF: https://github.com/llvm/llvm-project/commit/f3fa1086c7f83edcc473724a8ac7d675a9df11d7.diff
LOG: [Clang] Convert some tests to opaque pointers (NFC)
Added:
Modified:
clang/test/CodeGen/arm64-microsoft-arguments.cpp
clang/test/CodeGen/atomic-ops-libcall.c
clang/test/CodeGen/atomic-ops.c
clang/test/CodeGen/attr-nomerge.cpp
clang/test/CodeGen/attr-target-mv-va-args.c
clang/test/CodeGen/big-atomic-ops.c
clang/test/CodeGen/blocks.c
clang/test/CodeGen/bpf-attr-preserve-access-index-1.c
clang/test/CodeGen/bpf-attr-preserve-access-index-2.c
clang/test/CodeGen/builtin-preserve-access-index-array.c
clang/test/CodeGen/builtin-preserve-access-index-nonptr.c
clang/test/CodeGen/builtin-preserve-access-index-typedef.c
clang/test/CodeGen/builtin-preserve-access-index.c
clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c
clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c
clang/test/CodeGenCXX/microsoft-abi-thread-safe-statics.cpp
Removed:
################################################################################
diff --git a/clang/test/CodeGen/arm64-microsoft-arguments.cpp b/clang/test/CodeGen/arm64-microsoft-arguments.cpp
index ca1ff2025b74..a9ae6911b16e 100644
--- a/clang/test/CodeGen/arm64-microsoft-arguments.cpp
+++ b/clang/test/CodeGen/arm64-microsoft-arguments.cpp
@@ -1,9 +1,9 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple aarch64-windows -ffreestanding -emit-llvm -O0 \
+// RUN: %clang_cc1 -triple aarch64-windows -ffreestanding -emit-llvm -O0 \
// RUN: -x c++ -o - %s | FileCheck %s
// Pass and return for type size <= 8 bytes.
// CHECK: define {{.*}} i64 @{{.*}}f1{{.*}}()
-// CHECK: call i64 {{.*}}func1{{.*}}(i64 %3)
+// CHECK: call i64 {{.*}}func1{{.*}}(i64 %0)
struct S1 {
int a[2];
};
@@ -16,7 +16,7 @@ S1 f1() {
// Pass and return type size <= 16 bytes.
// CHECK: define {{.*}} [2 x i64] @{{.*}}f2{{.*}}()
-// CHECK: call [2 x i64] {{.*}}func2{{.*}}([2 x i64] %3)
+// CHECK: call [2 x i64] {{.*}}func2{{.*}}([2 x i64] %0)
struct S2 {
int a[4];
};
@@ -28,8 +28,8 @@ S2 f2() {
}
// Pass and return for type size > 16 bytes.
-// CHECK: define {{.*}} void @{{.*}}f3{{.*}}(%struct.S3* noalias sret(%struct.S3) align 4 %agg.result)
-// CHECK: call void {{.*}}func3{{.*}}(%struct.S3* sret(%struct.S3) align 4 %agg.result, %struct.S3* noundef %agg.tmp)
+// CHECK: define {{.*}} void @{{.*}}f3{{.*}}(ptr noalias sret(%struct.S3) align 4 %agg.result)
+// CHECK: call void {{.*}}func3{{.*}}(ptr sret(%struct.S3) align 4 %agg.result, ptr noundef %agg.tmp)
struct S3 {
int a[5];
};
@@ -42,8 +42,8 @@ S3 f3() {
// Pass and return aggregate (of size < 16 bytes) with non-trivial destructor.
// Passed directly but returned indirectly.
-// CHECK: define {{.*}} void {{.*}}f4{{.*}}(%struct.S4* inreg noalias sret(%struct.S4) align 4 %agg.result)
-// CHECK: call void {{.*}}func4{{.*}}(%struct.S4* inreg sret(%struct.S4) align 4 %agg.result, [2 x i64] %5)
+// CHECK: define {{.*}} void {{.*}}f4{{.*}}(ptr inreg noalias sret(%struct.S4) align 4 %agg.result)
+// CHECK: call void {{.*}}func4{{.*}}(ptr inreg sret(%struct.S4) align 4 %agg.result, [2 x i64] %0)
struct S4 {
int a[3];
~S4();
@@ -56,8 +56,8 @@ S4 f4() {
}
// Pass and return from instance method called from instance method.
-// CHECK: define {{.*}} void @{{.*}}bar at Q1{{.*}}(%class.Q1* {{[^,]*}} %this, %class.P1* inreg noalias sret(%class.P1) align 1 %agg.result)
-// CHECK: call void {{.*}}foo at P1{{.*}}(%class.P1* noundef{{[^,]*}} %ref.tmp, %class.P1* inreg sret(%class.P1) align 1 %agg.result, i8 %1)
+// CHECK: define {{.*}} void @{{.*}}bar at Q1{{.*}}(ptr {{[^,]*}} %this, ptr inreg noalias sret(%class.P1) align 1 %agg.result)
+// CHECK: call void {{.*}}foo at P1{{.*}}(ptr noundef{{[^,]*}} %ref.tmp, ptr inreg sret(%class.P1) align 1 %agg.result, i8 %0)
class P1 {
public:
@@ -76,7 +76,7 @@ P1 Q1::bar() {
// Pass and return from instance method called from free function.
// CHECK: define {{.*}} void {{.*}}bar{{.*}}()
-// CHECK: call void {{.*}}foo at P2{{.*}}(%class.P2* noundef{{[^,]*}} %ref.tmp, %class.P2* inreg sret(%class.P2) align 1 %retval, i8 %0)
+// CHECK: call void {{.*}}foo at P2{{.*}}(ptr noundef{{[^,]*}} %ref.tmp, ptr inreg sret(%class.P2) align 1 %retval, i8 %0)
class P2 {
public:
P2 foo(P2 x);
@@ -89,8 +89,8 @@ P2 bar() {
// Pass and return an object with a user-provided constructor (passed directly,
// returned indirectly)
-// CHECK: define {{.*}} void @{{.*}}f5{{.*}}(%struct.S5* inreg noalias sret(%struct.S5) align 4 %agg.result)
-// CHECK: call void {{.*}}func5{{.*}}(%struct.S5* inreg sret(%struct.S5) align 4 %agg.result, i64 {{.*}})
+// CHECK: define {{.*}} void @{{.*}}f5{{.*}}(ptr inreg noalias sret(%struct.S5) align 4 %agg.result)
+// CHECK: call void {{.*}}func5{{.*}}(ptr inreg sret(%struct.S5) align 4 %agg.result, i64 {{.*}})
struct S5 {
S5();
int x;
@@ -146,8 +146,8 @@ struct S8 {
int y;
};
-// CHECK: define {{.*}} void {{.*}}?f8{{.*}}(%struct.S8* inreg noalias sret(%struct.S8) align 4 {{.*}})
-// CHECK: call void {{.*}}func8{{.*}}(%struct.S8* inreg sret(%struct.S8) align 4 {{.*}}, i64 {{.*}})
+// CHECK: define {{.*}} void {{.*}}?f8{{.*}}(ptr inreg noalias sret(%struct.S8) align 4 {{.*}})
+// CHECK: call void {{.*}}func8{{.*}}(ptr inreg sret(%struct.S8) align 4 {{.*}}, i64 {{.*}})
S8 func8(S8 x);
S8 f8() {
S8 x;
@@ -157,8 +157,8 @@ S8 f8() {
// Pass and return an object with a non-trivial copy-assignment operator and
// a trivial copy constructor (passed directly, returned indirectly)
-// CHECK: define {{.*}} void @"?f9@@YA?AUS9@@XZ"(%struct.S9* inreg noalias sret(%struct.S9) align 4 {{.*}})
-// CHECK: call void {{.*}}func9{{.*}}(%struct.S9* inreg sret(%struct.S9) align 4 {{.*}}, i64 {{.*}})
+// CHECK: define {{.*}} void @"?f9@@YA?AUS9@@XZ"(ptr inreg noalias sret(%struct.S9) align 4 {{.*}})
+// CHECK: call void {{.*}}func9{{.*}}(ptr inreg sret(%struct.S9) align 4 {{.*}}, i64 {{.*}})
struct S9 {
S9& operator=(const S9&);
int x;
@@ -174,8 +174,8 @@ S9 f9() {
// Pass and return an object with a base class (passed directly, returned
// indirectly).
-// CHECK: define dso_local void {{.*}}f10{{.*}}(%struct.S10* inreg noalias sret(%struct.S10) align 4 {{.*}})
-// CHECK: call void {{.*}}func10{{.*}}(%struct.S10* inreg sret(%struct.S10) align 4 {{.*}}, [2 x i64] {{.*}})
+// CHECK: define dso_local void {{.*}}f10{{.*}}(ptr inreg noalias sret(%struct.S10) align 4 {{.*}})
+// CHECK: call void {{.*}}func10{{.*}}(ptr inreg sret(%struct.S10) align 4 {{.*}}, [2 x i64] {{.*}})
struct S10 : public S1 {
int x;
};
@@ -189,8 +189,8 @@ S10 f10() {
// Pass and return a non aggregate object exceeding > 128 bits (passed
// indirectly, returned indirectly)
-// CHECK: define dso_local void {{.*}}f11{{.*}}(%struct.S11* inreg noalias sret(%struct.S11) align 8 {{.*}})
-// CHECK: call void {{.*}}func11{{.*}}(%struct.S11* inreg sret(%struct.S11) align 8 {{.*}}, %struct.S11* {{.*}})
+// CHECK: define dso_local void {{.*}}f11{{.*}}(ptr inreg noalias sret(%struct.S11) align 8 {{.*}})
+// CHECK: call void {{.*}}func11{{.*}}(ptr inreg sret(%struct.S11) align 8 {{.*}}, ptr {{.*}})
struct S11 {
virtual void f();
int a[5];
diff --git a/clang/test/CodeGen/atomic-ops-libcall.c b/clang/test/CodeGen/atomic-ops-libcall.c
index 7000dbb937bf..745ccd22bf33 100644
--- a/clang/test/CodeGen/atomic-ops-libcall.c
+++ b/clang/test/CodeGen/atomic-ops-libcall.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s
+// RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s
// FIXME: This file should not be checking -O1 output.
// Ie, it is testing many IR optimizer passes as part of front-end verification.
@@ -10,109 +10,109 @@ enum memory_order {
int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
// CHECK: test_c11_atomic_fetch_add_int_ptr
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 12, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 12, i32 noundef 5)
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}
int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
// CHECK: test_c11_atomic_fetch_sub_int_ptr
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 20, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 20, i32 noundef 5)
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}
int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
// CHECK: test_c11_atomic_fetch_add_int
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 3, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 3, i32 noundef 5)
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}
int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
// CHECK: test_c11_atomic_fetch_sub_int
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 5, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 5, i32 noundef 5)
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}
int *fp2a(int **p) {
// CHECK: @fp2a
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 4, i32 noundef 0)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 4, i32 noundef 0)
// Note, the GNU builtins do not multiply by sizeof(T)!
return __atomic_fetch_sub(p, 4, memory_order_relaxed);
}
int test_atomic_fetch_add(int *p) {
// CHECK: test_atomic_fetch_add
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_add(p, 55, memory_order_seq_cst);
}
int test_atomic_fetch_sub(int *p) {
// CHECK: test_atomic_fetch_sub
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
}
int test_atomic_fetch_and(int *p) {
// CHECK: test_atomic_fetch_and
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_and_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_and(p, 55, memory_order_seq_cst);
}
int test_atomic_fetch_or(int *p) {
// CHECK: test_atomic_fetch_or
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_or_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_or(p, 55, memory_order_seq_cst);
}
int test_atomic_fetch_xor(int *p) {
// CHECK: test_atomic_fetch_xor
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_xor_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
}
int test_atomic_fetch_nand(int *p) {
// CHECK: test_atomic_fetch_nand
- // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: {{%[^ ]*}} = tail call i32 @__atomic_fetch_nand_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
}
int test_atomic_add_fetch(int *p) {
// CHECK: test_atomic_add_fetch
- // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_add_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55
return __atomic_add_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_sub_fetch(int *p) {
// CHECK: test_atomic_sub_fetch
- // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_sub_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55
return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_and_fetch(int *p) {
// CHECK: test_atomic_and_fetch
- // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_and_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55
return __atomic_and_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_or_fetch(int *p) {
// CHECK: test_atomic_or_fetch
- // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_or_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55
return __atomic_or_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_xor_fetch(int *p) {
// CHECK: test_atomic_xor_fetch
- // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_xor_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55
return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
}
int test_atomic_nand_fetch(int *p) {
// CHECK: test_atomic_nand_fetch
- // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(i8* noundef {{%[0-9]+}}, i32 noundef 55, i32 noundef 5)
+ // CHECK: [[CALL:%[^ ]*]] = tail call i32 @__atomic_fetch_nand_4(ptr noundef %p, i32 noundef 55, i32 noundef 5)
// FIXME: We should not be checking optimized IR. It changes independently of clang.
// FIXME-CHECK: [[AND:%[^ ]*]] = and i32 [[CALL]], 55
// FIXME-CHECK: {{%[^ ]*}} = xor i32 [[AND]], -1
diff --git a/clang/test/CodeGen/atomic-ops.c b/clang/test/CodeGen/atomic-ops.c
index e08e3509582f..1295786524a0 100644
--- a/clang/test/CodeGen/atomic-ops.c
+++ b/clang/test/CodeGen/atomic-ops.c
@@ -1,10 +1,10 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 | FileCheck %s
+// RUN: %clang_cc1 %s -emit-llvm -o - -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 | FileCheck %s
// REQUIRES: x86-registered-target
// Also test serialization of atomic operations here, to avoid duplicating the
// test.
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-pch -o %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9
-// RUN: %clang_cc1 -no-opaque-pointers %s -include-pch %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -emit-pch -o %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9
+// RUN: %clang_cc1 %s -include-pch %t -ffreestanding -ffake-address-space-map -triple=i686-apple-darwin9 -emit-llvm -o - | FileCheck %s
#ifndef ALREADY_INCLUDED
#define ALREADY_INCLUDED
@@ -14,13 +14,13 @@
int fi1(_Atomic(int) *i) {
// CHECK-LABEL: @fi1
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
return __c11_atomic_load(i, memory_order_seq_cst);
}
int fi1a(int *i) {
// CHECK-LABEL: @fi1a
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
int v;
__atomic_load(i, &v, memory_order_seq_cst);
return v;
@@ -28,13 +28,13 @@ int fi1a(int *i) {
int fi1b(int *i) {
// CHECK-LABEL: @fi1b
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
return __atomic_load_n(i, memory_order_seq_cst);
}
int fi1c(atomic_int *i) {
// CHECK-LABEL: @fi1c
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
return atomic_load(i);
}
@@ -116,7 +116,7 @@ int fi3f(int *i) {
_Bool fi4(_Atomic(int) *i) {
// CHECK-LABEL: @fi4(
- // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
+ // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg ptr [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
// CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
// CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
// CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
@@ -127,7 +127,7 @@ _Bool fi4(_Atomic(int) *i) {
_Bool fi4a(int *i) {
// CHECK-LABEL: @fi4a
- // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
+ // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg ptr [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
// CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
// CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
// CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
@@ -139,7 +139,7 @@ _Bool fi4a(int *i) {
_Bool fi4b(int *i) {
// CHECK-LABEL: @fi4b(
- // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak i32* [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
+ // CHECK: [[PAIR:%[.0-9A-Z_a-z]+]] = cmpxchg weak ptr [[PTR:%[.0-9A-Z_a-z]+]], i32 [[EXPECTED:%[.0-9A-Z_a-z]+]], i32 [[DESIRED:%[.0-9A-Z_a-z]+]] acquire acquire, align 4
// CHECK: [[OLD:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 0
// CHECK: [[CMP:%[.0-9A-Z_a-z]+]] = extractvalue { i32, i1 } [[PAIR]], 1
// CHECK: br i1 [[CMP]], label %[[STORE_EXPECTED:[.0-9A-Z_a-z]+]], label %[[CONTINUE:[.0-9A-Z_a-z]+]]
@@ -150,7 +150,7 @@ _Bool fi4b(int *i) {
_Bool fi4c(atomic_int *i) {
// CHECK-LABEL: @fi4c
- // CHECK: cmpxchg i32* {{.*}} seq_cst seq_cst, align 4
+ // CHECK: cmpxchg ptr {{.*}} seq_cst seq_cst, align 4
int cmp = 0;
return atomic_compare_exchange_strong(i, &cmp, 1);
}
@@ -158,14 +158,14 @@ _Bool fi4c(atomic_int *i) {
#define _AS1 __attribute__((address_space(1)))
_Bool fi4d(_Atomic(int) *i, int _AS1 *ptr2) {
// CHECK-LABEL: @fi4d(
- // CHECK: [[EXPECTED:%[.0-9A-Z_a-z]+]] = load i32, i32 addrspace(1)* %{{[0-9]+}}
- // CHECK: cmpxchg i32* %{{[0-9]+}}, i32 [[EXPECTED]], i32 %{{[0-9]+}} acquire acquire, align 4
+ // CHECK: [[EXPECTED:%[.0-9A-Z_a-z]+]] = load i32, ptr addrspace(1) %{{[0-9]+}}
+ // CHECK: cmpxchg ptr %{{[0-9]+}}, i32 [[EXPECTED]], i32 %{{[0-9]+}} acquire acquire, align 4
return __c11_atomic_compare_exchange_strong(i, ptr2, 1, memory_order_acquire, memory_order_acquire);
}
float ff1(_Atomic(float) *d) {
// CHECK-LABEL: @ff1
- // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
+ // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
return __c11_atomic_load(d, memory_order_relaxed);
}
@@ -185,24 +185,20 @@ struct S {
void implicit_store(_Atomic(struct S) *a, struct S s) {
// CHECK-LABEL: @implicit_store(
- // CHECK: store atomic i64 %{{.*}}, i64* %{{.*}} seq_cst, align 8
+ // CHECK: store atomic i64 %{{.*}}, ptr %{{.*}} seq_cst, align 8
*a = s;
}
struct S implicit_load(_Atomic(struct S) *a) {
// CHECK-LABEL: @implicit_load(
- // CHECK: load atomic i64, i64* %{{.*}} seq_cst, align 8
+ // CHECK: load atomic i64, ptr %{{.*}} seq_cst, align 8
return *a;
}
struct S fd1(struct S *a) {
// CHECK-LABEL: @fd1
// CHECK: [[RETVAL:%.*]] = alloca %struct.S, align 4
- // CHECK: [[A:%.*]] = bitcast %struct.S* {{.*}} to i64*
- // CHECK: [[CAST:%.*]] = bitcast %struct.S* [[RETVAL]] to i64*
- // CHECK: [[SRC:%.*]] = bitcast i64* [[A]] to i8*
- // CHECK: [[DEST:%.*]] = bitcast i64* [[CAST]] to i8*
- // CHECK: call void @__atomic_load(i32 noundef 8, i8* noundef [[SRC]], i8* noundef [[DEST]], i32 noundef 5)
+ // CHECK: call void @__atomic_load(i32 noundef 8, ptr noundef {{.*}}, ptr noundef [[RETVAL]], i32 noundef 5)
// CHECK: ret
struct S ret;
__atomic_load(a, &ret, memory_order_seq_cst);
@@ -211,68 +207,52 @@ struct S fd1(struct S *a) {
void fd2(struct S *a, struct S *b) {
// CHECK-LABEL: @fd2
- // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
- // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
- // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
- // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8*
- // CHECK-NEXT: call void @__atomic_store(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[CAST_B]],
+ // CHECK: [[A_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK-NEXT: store ptr %a, ptr [[A_ADDR]], align 4
+ // CHECK-NEXT: store ptr %b, ptr [[B_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load ptr, ptr [[A_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load ptr, ptr [[B_ADDR]], align 4
+ // CHECK-NEXT: call void @__atomic_store(i32 noundef 8, ptr noundef [[LOAD_A_PTR]], ptr noundef [[LOAD_B_PTR]],
// CHECK-NEXT: ret void
__atomic_store(a, b, memory_order_seq_cst);
}
void fd3(struct S *a, struct S *b, struct S *c) {
// CHECK-LABEL: @fd3
- // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK-NEXT: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
- // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
- // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
- // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
- // CHECK-NEXT: [[CAST_B:%.*]] = bitcast i64* [[COERCED_B]] to i8*
- // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8*
- // CHECK-NEXT: call void @__atomic_exchange(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[CAST_B]], i8* noundef [[CAST_C]],
+ // CHECK: [[A_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK-NEXT: store ptr %a, ptr [[A_ADDR]], align 4
+ // CHECK-NEXT: store ptr %b, ptr [[B_ADDR]], align 4
+ // CHECK-NEXT: store ptr %c, ptr [[C_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load ptr, ptr [[A_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load ptr, ptr [[B_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load ptr, ptr [[C_ADDR]], align 4
+ // CHECK-NEXT: call void @__atomic_exchange(i32 noundef 8, ptr noundef [[LOAD_A_PTR]], ptr noundef [[LOAD_B_PTR]], ptr noundef [[LOAD_C_PTR]],
__atomic_exchange(a, b, c, memory_order_seq_cst);
}
_Bool fd4(struct S *a, struct S *b, struct S *c) {
// CHECK-LABEL: @fd4
- // CHECK: [[A_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK-NEXT: [[B_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK-NEXT: [[C_ADDR:%.*]] = alloca %struct.S*, align 4
- // CHECK: store %struct.S* %a, %struct.S** [[A_ADDR]], align 4
- // CHECK-NEXT: store %struct.S* %b, %struct.S** [[B_ADDR]], align 4
- // CHECK-NEXT: store %struct.S* %c, %struct.S** [[C_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load %struct.S*, %struct.S** [[A_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load %struct.S*, %struct.S** [[B_ADDR]], align 4
- // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load %struct.S*, %struct.S** [[C_ADDR]], align 4
- // CHECK-NEXT: [[COERCED_A_TMP:%.*]] = bitcast %struct.S* [[LOAD_A_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_B_TMP:%.*]] = bitcast %struct.S* [[LOAD_B_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_C:%.*]] = bitcast %struct.S* [[LOAD_C_PTR]] to i64*
- // CHECK-NEXT: [[COERCED_A:%.*]] = bitcast i64* [[COERCED_A_TMP]] to i8*
- // CHECK-NEXT: [[COERCED_B:%.*]] = bitcast i64* [[COERCED_B_TMP]] to i8*
- // CHECK-NEXT: [[CAST_C:%.*]] = bitcast i64* [[COERCED_C]] to i8*
- // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 8, i8* noundef [[COERCED_A]], i8* noundef [[COERCED_B]], i8* noundef [[CAST_C]],
+ // CHECK: [[A_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
+ // CHECK: store ptr %a, ptr [[A_ADDR]], align 4
+ // CHECK-NEXT: store ptr %b, ptr [[B_ADDR]], align 4
+ // CHECK-NEXT: store ptr %c, ptr [[C_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_A_PTR:%.*]] = load ptr, ptr [[A_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_B_PTR:%.*]] = load ptr, ptr [[B_ADDR]], align 4
+ // CHECK-NEXT: [[LOAD_C_PTR:%.*]] = load ptr, ptr [[C_ADDR]], align 4
+ // CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 8, ptr noundef [[LOAD_A_PTR]], ptr noundef [[LOAD_B_PTR]], ptr noundef [[LOAD_C_PTR]],
// CHECK-NEXT: ret i1 [[CALL]]
return __atomic_compare_exchange(a, b, c, 1, 5, 5);
}
int* fp1(_Atomic(int*) *p) {
// CHECK-LABEL: @fp1
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
return __c11_atomic_load(p, memory_order_seq_cst);
}
@@ -293,20 +273,20 @@ int *fp2a(int **p) {
_Complex float fc(_Atomic(_Complex float) *c) {
// CHECK-LABEL: @fc
- // CHECK: atomicrmw xchg i64* {{.*}} seq_cst, align 8
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 8
return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
}
typedef struct X { int x; } X;
X fs(_Atomic(X) *c) {
// CHECK-LABEL: @fs
- // CHECK: atomicrmw xchg i32* {{.*}} seq_cst, align 4
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 4
return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
}
X fsa(X *c, X *d) {
// CHECK-LABEL: @fsa
- // CHECK: atomicrmw xchg i32* {{.*}} seq_cst, align 4
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 4
X ret;
__atomic_exchange(c, d, &ret, memory_order_seq_cst);
return ret;
@@ -314,20 +294,20 @@ X fsa(X *c, X *d) {
_Bool fsb(_Bool *c) {
// CHECK-LABEL: @fsb
- // CHECK: atomicrmw xchg i8* {{.*}} seq_cst, align 1
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 1
return __atomic_exchange_n(c, 1, memory_order_seq_cst);
}
char flag1;
volatile char flag2;
void test_and_set(void) {
- // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst, align 1
+ // CHECK: atomicrmw xchg ptr @flag1, i8 1 seq_cst, align 1
__atomic_test_and_set(&flag1, memory_order_seq_cst);
- // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire, align 1
+ // CHECK: atomicrmw volatile xchg ptr @flag2, i8 1 acquire, align 1
__atomic_test_and_set(&flag2, memory_order_acquire);
- // CHECK: store atomic volatile i8 0, i8* @flag2 release, align 1
+ // CHECK: store atomic volatile i8 0, ptr @flag2 release, align 1
__atomic_clear(&flag2, memory_order_release);
- // CHECK: store atomic i8 0, i8* @flag1 seq_cst, align 1
+ // CHECK: store atomic i8 0, ptr @flag1 seq_cst, align 1
__atomic_clear(&flag1, memory_order_seq_cst);
}
@@ -343,13 +323,13 @@ struct Incomplete;
int lock_free(struct Incomplete *incomplete) {
// CHECK-LABEL: @lock_free
- // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 3, i8* noundef null)
+ // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 3, ptr noundef null)
__c11_atomic_is_lock_free(3);
- // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 16, i8* noundef {{.*}}@sixteen{{.*}})
+ // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 16, ptr noundef {{.*}}@sixteen{{.*}})
__atomic_is_lock_free(16, &sixteen);
- // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 17, i8* noundef {{.*}}@seventeen{{.*}})
+ // CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 17, ptr noundef {{.*}}@seventeen{{.*}})
__atomic_is_lock_free(17, &seventeen);
// CHECK: call zeroext i1 @__atomic_is_lock_free(i32 noundef 4, {{.*}})
@@ -393,30 +373,30 @@ void structAtomicStore(void) {
struct foo f = {0};
struct bar b = {0};
__atomic_store(&smallThing, &b, 5);
- // CHECK: call void @__atomic_store(i32 noundef 3, i8* noundef {{.*}} @smallThing
+ // CHECK: call void @__atomic_store(i32 noundef 3, ptr noundef @smallThing
__atomic_store(&bigThing, &f, 5);
- // CHECK: call void @__atomic_store(i32 noundef 512, i8* noundef {{.*}} @bigThing
+ // CHECK: call void @__atomic_store(i32 noundef 512, ptr noundef @bigThing
}
void structAtomicLoad(void) {
// CHECK-LABEL: @structAtomicLoad
struct bar b;
__atomic_load(&smallThing, &b, 5);
- // CHECK: call void @__atomic_load(i32 noundef 3, i8* noundef {{.*}} @smallThing
+ // CHECK: call void @__atomic_load(i32 noundef 3, ptr noundef @smallThing
struct foo f = {0};
__atomic_load(&bigThing, &f, 5);
- // CHECK: call void @__atomic_load(i32 noundef 512, i8* noundef {{.*}} @bigThing
+ // CHECK: call void @__atomic_load(i32 noundef 512, ptr noundef @bigThing
}
struct foo structAtomicExchange(void) {
// CHECK-LABEL: @structAtomicExchange
struct foo f = {0};
struct foo old;
__atomic_exchange(&f, &bigThing, &old, 5);
- // CHECK: call void @__atomic_exchange(i32 noundef 512, {{.*}}, i8* noundef bitcast ({{.*}} @bigThing to i8*),
+ // CHECK: call void @__atomic_exchange(i32 noundef 512, {{.*}}, ptr noundef @bigThing,
return __c11_atomic_exchange(&bigAtomic, f, 5);
- // CHECK: call void @__atomic_exchange(i32 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
+ // CHECK: call void @__atomic_exchange(i32 noundef 512, ptr noundef @bigAtomic,
}
int structAtomicCmpExchange(void) {
// CHECK-LABEL: @structAtomicCmpExchange
@@ -424,8 +404,8 @@ int structAtomicCmpExchange(void) {
_Bool x = __atomic_compare_exchange(&smallThing, &thing1, &thing2, 1, 5, 5);
// CHECK: %[[call1:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 3, {{.*}} @smallThing{{.*}} @thing1{{.*}} @thing2
// CHECK: %[[zext1:.*]] = zext i1 %[[call1]] to i8
- // CHECK: store i8 %[[zext1]], i8* %[[x_mem]], align 1
- // CHECK: %[[x:.*]] = load i8, i8* %[[x_mem]]
+ // CHECK: store i8 %[[zext1]], ptr %[[x_mem]], align 1
+ // CHECK: %[[x:.*]] = load i8, ptr %[[x_mem]]
// CHECK: %[[x_bool:.*]] = trunc i8 %[[x]] to i1
// CHECK: %[[conv1:.*]] = zext i1 %[[x_bool]] to i32
@@ -433,7 +413,7 @@ int structAtomicCmpExchange(void) {
struct foo g = {0};
g.big[12] = 12;
return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
- // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
+ // CHECK: %[[call2:.*]] = call zeroext i1 @__atomic_compare_exchange(i32 noundef 512, ptr noundef @bigAtomic,
// CHECK: %[[conv2:.*]] = zext i1 %[[call2]] to i32
// CHECK: %[[and:.*]] = and i32 %[[conv1]], %[[conv2]]
// CHECK: ret i32 %[[and]]
@@ -463,19 +443,19 @@ void atomic_init_foo(void)
// CHECK-LABEL: @failureOrder
void failureOrder(_Atomic(int) *ptr, int *ptr2) {
__c11_atomic_compare_exchange_strong(ptr, ptr2, 43, memory_order_acquire, memory_order_relaxed);
- // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic, align 4
+ // CHECK: cmpxchg ptr {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acquire monotonic, align 4
__c11_atomic_compare_exchange_weak(ptr, ptr2, 43, memory_order_seq_cst, memory_order_acquire);
- // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire, align 4
+ // CHECK: cmpxchg weak ptr {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst acquire, align 4
// Unknown ordering: conservatively pick strongest valid option (for now!).
__atomic_compare_exchange(ptr2, ptr2, ptr2, 0, memory_order_acq_rel, *ptr2);
- // CHECK: cmpxchg i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire, align 4
+ // CHECK: cmpxchg ptr {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} acq_rel acquire, align 4
// Undefined behaviour: don't really care what that last ordering is so leave
// it out:
__atomic_compare_exchange_n(ptr2, ptr2, 43, 1, memory_order_seq_cst, 42);
- // CHECK: cmpxchg weak i32* {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst {{.*}}, align 4
+ // CHECK: cmpxchg weak ptr {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z._]+}}, i32 {{%[0-9A-Za-z_.]+}} seq_cst {{.*}}, align 4
}
// CHECK-LABEL: @generalFailureOrder
@@ -656,44 +636,44 @@ int PR21643(void) {
__ATOMIC_RELAXED);
// CHECK: %[[atomictmp:.*]] = alloca i32, align 4
// CHECK: %[[atomicdst:.*]] = alloca i32, align 4
- // CHECK: store i32 1, i32* %[[atomictmp]]
- // CHECK: %[[one:.*]] = load i32, i32* %[[atomictmp]], align 4
- // CHECK: %[[old:.*]] = atomicrmw or i32 addrspace(257)* inttoptr (i32 776 to i32 addrspace(257)*), i32 %[[one]] monotonic, align 4
+ // CHECK: store i32 1, ptr %[[atomictmp]]
+ // CHECK: %[[one:.*]] = load i32, ptr %[[atomictmp]], align 4
+ // CHECK: %[[old:.*]] = atomicrmw or ptr addrspace(257) inttoptr (i32 776 to ptr addrspace(257)), i32 %[[one]] monotonic, align 4
// CHECK: %[[new:.*]] = or i32 %[[old]], %[[one]]
- // CHECK: store i32 %[[new]], i32* %[[atomicdst]], align 4
- // CHECK: %[[ret:.*]] = load i32, i32* %[[atomicdst]], align 4
+ // CHECK: store i32 %[[new]], ptr %[[atomicdst]], align 4
+ // CHECK: %[[ret:.*]] = load i32, ptr %[[atomicdst]], align 4
// CHECK: ret i32 %[[ret]]
}
int PR17306_1(volatile _Atomic(int) *i) {
// CHECK-LABEL: @PR17306_1
- // CHECK: %[[i_addr:.*]] = alloca i32
+ // CHECK: %[[i_addr:.*]] = alloca ptr
// CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
- // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
- // CHECK-NEXT: %[[addr:.*]] = load i32*, i32** %[[i_addr]]
- // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, i32* %[[addr]] seq_cst, align 4
- // CHECK-NEXT: store i32 %[[res]], i32* %[[atomicdst]]
- // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
+ // CHECK-NEXT: store ptr %i, ptr %[[i_addr]]
+ // CHECK-NEXT: %[[addr:.*]] = load ptr, ptr %[[i_addr]]
+ // CHECK-NEXT: %[[res:.*]] = load atomic volatile i32, ptr %[[addr]] seq_cst, align 4
+ // CHECK-NEXT: store i32 %[[res]], ptr %[[atomicdst]]
+ // CHECK-NEXT: %[[retval:.*]] = load i32, ptr %[[atomicdst]]
// CHECK-NEXT: ret i32 %[[retval]]
return __c11_atomic_load(i, memory_order_seq_cst);
}
int PR17306_2(volatile int *i, int value) {
// CHECK-LABEL: @PR17306_2
- // CHECK: %[[i_addr:.*]] = alloca i32*
+ // CHECK: %[[i_addr:.*]] = alloca ptr
// CHECK-NEXT: %[[value_addr:.*]] = alloca i32
// CHECK-NEXT: %[[atomictmp:.*]] = alloca i32
// CHECK-NEXT: %[[atomicdst:.*]] = alloca i32
- // CHECK-NEXT: store i32* %i, i32** %[[i_addr]]
- // CHECK-NEXT: store i32 %value, i32* %[[value_addr]]
- // CHECK-NEXT: %[[i_lval:.*]] = load i32*, i32** %[[i_addr]]
- // CHECK-NEXT: %[[value:.*]] = load i32, i32* %[[value_addr]]
- // CHECK-NEXT: store i32 %[[value]], i32* %[[atomictmp]]
- // CHECK-NEXT: %[[value_lval:.*]] = load i32, i32* %[[atomictmp]]
- // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add i32* %[[i_lval]], i32 %[[value_lval]] seq_cst, align 4
+ // CHECK-NEXT: store ptr %i, ptr %[[i_addr]]
+ // CHECK-NEXT: store i32 %value, ptr %[[value_addr]]
+ // CHECK-NEXT: %[[i_lval:.*]] = load ptr, ptr %[[i_addr]]
+ // CHECK-NEXT: %[[value:.*]] = load i32, ptr %[[value_addr]]
+ // CHECK-NEXT: store i32 %[[value]], ptr %[[atomictmp]]
+ // CHECK-NEXT: %[[value_lval:.*]] = load i32, ptr %[[atomictmp]]
+ // CHECK-NEXT: %[[old_val:.*]] = atomicrmw volatile add ptr %[[i_lval]], i32 %[[value_lval]] seq_cst, align 4
// CHECK-NEXT: %[[new_val:.*]] = add i32 %[[old_val]], %[[value_lval]]
- // CHECK-NEXT: store i32 %[[new_val]], i32* %[[atomicdst]]
- // CHECK-NEXT: %[[retval:.*]] = load i32, i32* %[[atomicdst]]
+ // CHECK-NEXT: store i32 %[[new_val]], ptr %[[atomicdst]]
+ // CHECK-NEXT: %[[retval:.*]] = load i32, ptr %[[atomicdst]]
// CHECK-NEXT: ret i32 %[[retval]]
return __atomic_add_fetch(i, value, memory_order_seq_cst);
}
@@ -717,37 +697,37 @@ void test_underaligned(void) {
__atomic_load(&aligned_a, &aligned_b, memory_order_seq_cst);
// CHECK: store atomic i64 {{.*}}, align 16
__atomic_store(&aligned_a, &aligned_b, memory_order_seq_cst);
- // CHECK: atomicrmw xchg i64* {{.*}}, align 8
+ // CHECK: atomicrmw xchg ptr {{.*}}, align 8
__atomic_exchange(&aligned_a, &aligned_b, &aligned_c, memory_order_seq_cst);
- // CHECK: cmpxchg weak i64* {{.*}}, align 8
+ // CHECK: cmpxchg weak ptr {{.*}}, align 8
__atomic_compare_exchange(&aligned_a, &aligned_b, &aligned_c, 1, memory_order_seq_cst, memory_order_seq_cst);
}
void test_c11_minmax(_Atomic(int) * si, _Atomic(unsigned) * ui, _Atomic(short) * ss, _Atomic(unsigned char) * uc, _Atomic(long long) * sll) {
// CHECK-LABEL: @test_c11_minmax
- // CHECK: atomicrmw max i32* {{.*}} acquire, align 4
+ // CHECK: atomicrmw max ptr {{.*}} acquire, align 4
*si = __c11_atomic_fetch_max(si, 42, memory_order_acquire);
- // CHECK: atomicrmw min i32* {{.*}} acquire, align 4
+ // CHECK: atomicrmw min ptr {{.*}} acquire, align 4
*si = __c11_atomic_fetch_min(si, 42, memory_order_acquire);
- // CHECK: atomicrmw umax i32* {{.*}} acquire, align 4
+ // CHECK: atomicrmw umax ptr {{.*}} acquire, align 4
*ui = __c11_atomic_fetch_max(ui, 42, memory_order_acquire);
- // CHECK: atomicrmw umin i32* {{.*}} acquire, align 4
+ // CHECK: atomicrmw umin ptr {{.*}} acquire, align 4
*ui = __c11_atomic_fetch_min(ui, 42, memory_order_acquire);
- // CHECK: atomicrmw max i16* {{.*}} acquire, align 2
+ // CHECK: atomicrmw max ptr {{.*}} acquire, align 2
*ss = __c11_atomic_fetch_max(ss, 42, memory_order_acquire);
- // CHECK: atomicrmw min i16* {{.*}} acquire, align 2
+ // CHECK: atomicrmw min ptr {{.*}} acquire, align 2
*ss = __c11_atomic_fetch_min(ss, 42, memory_order_acquire);
- // CHECK: atomicrmw umax i8* {{.*}} acquire, align 1
+ // CHECK: atomicrmw umax ptr {{.*}} acquire, align 1
*uc = __c11_atomic_fetch_max(uc, 42, memory_order_acquire);
- // CHECK: atomicrmw umin i8* {{.*}} acquire, align 1
+ // CHECK: atomicrmw umin ptr {{.*}} acquire, align 1
*uc = __c11_atomic_fetch_min(uc, 42, memory_order_acquire);
- // CHECK: atomicrmw max i64* {{.*}} acquire, align 8
+ // CHECK: atomicrmw max ptr {{.*}} acquire, align 8
*sll = __c11_atomic_fetch_max(sll, 42, memory_order_acquire);
- // CHECK: atomicrmw min i64* {{.*}} acquire, align 8
+ // CHECK: atomicrmw min ptr {{.*}} acquire, align 8
*sll = __c11_atomic_fetch_min(sll, 42, memory_order_acquire);
}
@@ -756,46 +736,46 @@ void test_minmax_postop(int *si, unsigned *ui, unsigned short *us, signed char *
int val = 42;
// CHECK-LABEL: @test_minmax_postop
- // CHECK: [[OLD:%.*]] = atomicrmw max i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
+ // CHECK: [[OLD:%.*]] = atomicrmw max ptr [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
// CHECK: [[TST:%.*]] = icmp sgt i32 [[OLD]], [[RHS]]
// CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
- // CHECK: store i32 [[NEW]], i32*
+ // CHECK: store i32 [[NEW]], ptr
*si = __atomic_max_fetch(si, 42, memory_order_release);
- // CHECK: [[OLD:%.*]] = atomicrmw min i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
+ // CHECK: [[OLD:%.*]] = atomicrmw min ptr [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
// CHECK: [[TST:%.*]] = icmp slt i32 [[OLD]], [[RHS]]
// CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
- // CHECK: store i32 [[NEW]], i32*
+ // CHECK: store i32 [[NEW]], ptr
*si = __atomic_min_fetch(si, 42, memory_order_release);
- // CHECK: [[OLD:%.*]] = atomicrmw umax i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
+ // CHECK: [[OLD:%.*]] = atomicrmw umax ptr [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
// CHECK: [[TST:%.*]] = icmp ugt i32 [[OLD]], [[RHS]]
// CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
- // CHECK: store i32 [[NEW]], i32*
+ // CHECK: store i32 [[NEW]], ptr
*ui = __atomic_max_fetch(ui, 42, memory_order_release);
- // CHECK: [[OLD:%.*]] = atomicrmw umin i32* [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
+ // CHECK: [[OLD:%.*]] = atomicrmw umin ptr [[PTR:%.*]], i32 [[RHS:%.*]] release, align 4
// CHECK: [[TST:%.*]] = icmp ult i32 [[OLD]], [[RHS]]
// CHECK: [[NEW:%.*]] = select i1 [[TST]], i32 [[OLD]], i32 [[RHS]]
- // CHECK: store i32 [[NEW]], i32*
+ // CHECK: store i32 [[NEW]], ptr
*ui = __atomic_min_fetch(ui, 42, memory_order_release);
- // CHECK: [[OLD:%.*]] = atomicrmw umin i16* [[PTR:%.*]], i16 [[RHS:%.*]] release, align 2
+ // CHECK: [[OLD:%.*]] = atomicrmw umin ptr [[PTR:%.*]], i16 [[RHS:%.*]] release, align 2
// CHECK: [[TST:%.*]] = icmp ult i16 [[OLD]], [[RHS]]
// CHECK: [[NEW:%.*]] = select i1 [[TST]], i16 [[OLD]], i16 [[RHS]]
- // CHECK: store i16 [[NEW]], i16*
+ // CHECK: store i16 [[NEW]], ptr
*us = __atomic_min_fetch(us, 42, memory_order_release);
- // CHECK: [[OLD:%.*]] = atomicrmw min i8* [[PTR:%.*]], i8 [[RHS:%.*]] release, align 1
+ // CHECK: [[OLD:%.*]] = atomicrmw min ptr [[PTR:%.*]], i8 [[RHS:%.*]] release, align 1
// CHECK: [[TST:%.*]] = icmp slt i8 [[OLD]], [[RHS]]
// CHECK: [[NEW:%.*]] = select i1 [[TST]], i8 [[OLD]], i8 [[RHS]]
- // CHECK: store i8 [[NEW]], i8*
+ // CHECK: store i8 [[NEW]], ptr
*sc = __atomic_min_fetch(sc, 42, memory_order_release);
- // CHECK: [[OLD:%.*]] = call i64 @__atomic_fetch_umin_8(i8* noundef {{%.*}}, i64 noundef [[RHS:%.*]],
+ // CHECK: [[OLD:%.*]] = call i64 @__atomic_fetch_umin_8(ptr noundef {{%.*}}, i64 noundef [[RHS:%.*]],
// CHECK: [[TST:%.*]] = icmp ult i64 [[OLD]], [[RHS]]
// CHECK: [[NEW:%.*]] = select i1 [[TST]], i64 [[OLD]], i64 [[RHS]]
- // CHECK: store i64 [[NEW]], i64*
+ // CHECK: store i64 [[NEW]], ptr
*ull = __atomic_min_fetch(ull, 42, memory_order_release);
}
diff --git a/clang/test/CodeGen/attr-nomerge.cpp b/clang/test/CodeGen/attr-nomerge.cpp
index b8f678896019..1be84d76aa45 100644
--- a/clang/test/CodeGen/attr-nomerge.cpp
+++ b/clang/test/CodeGen/attr-nomerge.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -S -emit-llvm %s -triple x86_64-unknown-linux-gnu -o - | FileCheck %s
+// RUN: %clang_cc1 -S -emit-llvm %s -triple x86_64-unknown-linux-gnu -o - | FileCheck %s
class A {
public:
@@ -74,10 +74,14 @@ void something_else_again() {
// CHECK: call noundef zeroext i1 @_Z3barv() #[[ATTR0]]
// CHECK: call void asm sideeffect "nop"{{.*}} #[[ATTR1:[0-9]+]]
// CHECK: call noundef zeroext i1 @_Z3barv(){{$}}
-// CHECK: %[[AG:.*]] = load void (%class.A*)*, void (%class.A*)**
-// CHECK-NEXT: call void %[[AG]](%class.A* {{.*}}) #[[ATTR0]]
-// CHECK: %[[BG:.*]] = load void (%class.B*)*, void (%class.B*)**
-// CHECK-NEXT: call void %[[BG]](%class.B* noundef{{.*}}
+// CHECK: load ptr, ptr
+// CHECK: load ptr, ptr
+// CHECK: %[[AG:.*]] = load ptr, ptr
+// CHECK-NEXT: call void %[[AG]](ptr {{.*}}) #[[ATTR0]]
+// CHECK: load ptr, ptr
+// CHECK: load ptr, ptr
+// CHECK: %[[BG:.*]] = load ptr, ptr
+// CHECK-NEXT: call void %[[BG]](ptr noundef{{.*}}
// CHECK: call void @_ZN1AC1Ev({{.*}}) #[[ATTR0]]
// CHECK: call void @_ZN1A1fEv({{.*}}) #[[ATTR0]]
// CHECK: call void @_ZN1A1gEv({{.*}}) #[[ATTR0]]
@@ -85,9 +89,11 @@ void something_else_again() {
// CHECK: call void @_ZN1BC1Ev({{.*}}){{$}}
// CHECK: call void @_ZN1B1gEv({{.*}}){{$}}
// CHECK: call void @_ZN1BC1Ev({{.*}}){{$}}
-// CHECK: %[[AG:.*]] = load void (%class.A*)*, void (%class.A*)**
-// CHECK-NEXT: call void %[[AG]](%class.A* {{.*}}) #[[ATTR1]]
-// CHECK: call void @_ZN1AD1Ev(%class.A* {{.*}}) #[[ATTR1]]
+// CHECK: load ptr, ptr
+// CHECK: load ptr, ptr
+// CHECK: %[[AG:.*]] = load ptr, ptr
+// CHECK-NEXT: call void %[[AG]](ptr {{.*}}) #[[ATTR1]]
+// CHECK: call void @_ZN1AD1Ev(ptr {{.*}}) #[[ATTR1]]
// CHECK-DAG: attributes #[[ATTR0]] = {{{.*}}nomerge{{.*}}}
// CHECK-DAG: attributes #[[ATTR1]] = {{{.*}}nomerge{{.*}}}
diff --git a/clang/test/CodeGen/attr-target-mv-va-args.c b/clang/test/CodeGen/attr-target-mv-va-args.c
index c51eaffcdde7..e75796d7ee03 100644
--- a/clang/test/CodeGen/attr-target-mv-va-args.c
+++ b/clang/test/CodeGen/attr-target-mv-va-args.c
@@ -1,6 +1,6 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=LINUX
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-windows-pc -emit-llvm %s -o - | FileCheck %s --check-prefixes=NO-IFUNC,WINDOWS
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64-fuchsia -emit-llvm %s -o - | FileCheck %s --check-prefixes=NO-IFUNC,FUCHSIA
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=LINUX
+// RUN: %clang_cc1 -triple x86_64-windows-pc -emit-llvm %s -o - | FileCheck %s --check-prefixes=NO-IFUNC,WINDOWS
+// RUN: %clang_cc1 -triple x86_64-fuchsia -emit-llvm %s -o - | FileCheck %s --check-prefixes=NO-IFUNC,FUCHSIA
int __attribute__((target("sse4.2"))) foo(int i, ...) { return 0; }
int __attribute__((target("arch=sandybridge"))) foo(int i, ...);
int __attribute__((target("arch=ivybridge"))) foo(int i, ...) {return 1;}
@@ -10,7 +10,7 @@ int bar(void) {
return foo(1, 'a', 1.1) + foo(2, 2.2, "asdf");
}
-// LINUX: @foo.ifunc = weak_odr ifunc i32 (i32, ...), i32 (i32, ...)* ()* @foo.resolver
+// LINUX: @foo.ifunc = weak_odr ifunc i32 (i32, ...), ptr @foo.resolver
// LINUX: define{{.*}} i32 @foo.sse4.2(i32 noundef %i, ...)
// LINUX: ret i32 0
// LINUX: define{{.*}} i32 @foo.arch_ivybridge(i32 noundef %i, ...)
@@ -19,13 +19,13 @@ int bar(void) {
// LINUX: ret i32 2
// LINUX: define{{.*}} i32 @bar()
// LINUX: call i32 (i32, ...) @foo.ifunc(i32 noundef 1, i32 noundef 97, double
-// LINUX: call i32 (i32, ...) @foo.ifunc(i32 noundef 2, double noundef 2.2{{[0-9Ee+]+}}, i8* noundef getelementptr inbounds
+// LINUX: call i32 (i32, ...) @foo.ifunc(i32 noundef 2, double noundef 2.2{{[0-9Ee+]+}}, ptr noundef
-// LINUX: define weak_odr i32 (i32, ...)* @foo.resolver() comdat
-// LINUX: ret i32 (i32, ...)* @foo.arch_sandybridge
-// LINUX: ret i32 (i32, ...)* @foo.arch_ivybridge
-// LINUX: ret i32 (i32, ...)* @foo.sse4.2
-// LINUX: ret i32 (i32, ...)* @foo
+// LINUX: define weak_odr ptr @foo.resolver() comdat
+// LINUX: ret ptr @foo.arch_sandybridge
+// LINUX: ret ptr @foo.arch_ivybridge
+// LINUX: ret ptr @foo.sse4.2
+// LINUX: ret ptr @foo
// LINUX: declare i32 @foo.arch_sandybridge(i32 noundef, ...)
// NO-IFUNC: define dso_local i32 @foo.sse4.2(i32 noundef %i, ...)
@@ -36,7 +36,7 @@ int bar(void) {
// NO-IFUNC: ret i32 2
// NO-IFUNC: define dso_local i32 @bar()
// NO-IFUNC: call i32 (i32, ...) @foo.resolver(i32 noundef 1, i32 noundef 97, double
-// NO-IFUNC: call i32 (i32, ...) @foo.resolver(i32 noundef 2, double noundef 2.2{{[0-9Ee+]+}}, i8* noundef getelementptr inbounds
+// NO-IFUNC: call i32 (i32, ...) @foo.resolver(i32 noundef 2, double noundef 2.2{{[0-9Ee+]+}}, ptr noundef
// WINDOWS: define weak_odr dso_local i32 @foo.resolver(i32 %0, ...) comdat
// FUCHSIA: define weak_odr i32 @foo.resolver(i32 %0, ...) comdat
diff --git a/clang/test/CodeGen/big-atomic-ops.c b/clang/test/CodeGen/big-atomic-ops.c
index 15d0ef3279a4..7ef772027ef8 100644
--- a/clang/test/CodeGen/big-atomic-ops.c
+++ b/clang/test/CodeGen/big-atomic-ops.c
@@ -1,9 +1,9 @@
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-llvm -o - -triple=x86_64-apple-macosx10.9.0 | FileCheck %s
+// RUN: %clang_cc1 %s -emit-llvm -o - -triple=x86_64-apple-macosx10.9.0 | FileCheck %s
// REQUIRES: x86-registered-target
// Also test serialization of atomic operations here, to avoid duplicating the
// test.
-// RUN: %clang_cc1 -no-opaque-pointers %s -emit-pch -o %t -triple=x86_64-apple-macosx10.9.0
-// RUN: %clang_cc1 -no-opaque-pointers %s -include-pch %t -triple=x86_64-apple-macosx10.9.0 -emit-llvm -o - | FileCheck %s
+// RUN: %clang_cc1 %s -emit-pch -o %t -triple=x86_64-apple-macosx10.9.0
+// RUN: %clang_cc1 %s -include-pch %t -triple=x86_64-apple-macosx10.9.0 -emit-llvm -o - | FileCheck %s
#ifndef ALREADY_INCLUDED
#define ALREADY_INCLUDED
@@ -16,13 +16,13 @@ typedef enum memory_order {
int fi1(_Atomic(int) *i) {
// CHECK: @fi1
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
return __c11_atomic_load(i, memory_order_seq_cst);
}
int fi1a(int *i) {
// CHECK: @fi1a
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
int v;
__atomic_load(i, &v, memory_order_seq_cst);
return v;
@@ -30,7 +30,7 @@ int fi1a(int *i) {
int fi1b(int *i) {
// CHECK: @fi1b
- // CHECK: load atomic i32, i32* {{.*}} seq_cst, align 4
+ // CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
return __atomic_load_n(i, memory_order_seq_cst);
}
@@ -91,14 +91,14 @@ int fi3d(int *i) {
_Bool fi4(_Atomic(int) *i) {
// CHECK: @fi4
- // CHECK: cmpxchg i32* {{.*}} acquire acquire, align 4
+ // CHECK: cmpxchg ptr {{.*}} acquire acquire, align 4
int cmp = 0;
return __c11_atomic_compare_exchange_strong(i, &cmp, 1, memory_order_acquire, memory_order_acquire);
}
_Bool fi4a(int *i) {
// CHECK: @fi4
- // CHECK: cmpxchg i32* {{.*}} acquire acquire, align 4
+ // CHECK: cmpxchg ptr {{.*}} acquire acquire, align 4
int cmp = 0;
int desired = 1;
return __atomic_compare_exchange(i, &cmp, &desired, 0, memory_order_acquire, memory_order_acquire);
@@ -106,14 +106,14 @@ _Bool fi4a(int *i) {
_Bool fi4b(int *i) {
// CHECK: @fi4
- // CHECK: cmpxchg weak i32* {{.*}} acquire acquire, align 4
+ // CHECK: cmpxchg weak ptr {{.*}} acquire acquire, align 4
int cmp = 0;
return __atomic_compare_exchange_n(i, &cmp, 1, 1, memory_order_acquire, memory_order_acquire);
}
float ff1(_Atomic(float) *d) {
// CHECK: @ff1
- // CHECK: load atomic i32, i32* {{.*}} monotonic, align 4
+ // CHECK: load atomic i32, ptr {{.*}} monotonic, align 4
return __c11_atomic_load(d, memory_order_relaxed);
}
@@ -129,7 +129,7 @@ float ff3(_Atomic(float) *d) {
int* fp1(_Atomic(int*) *p) {
// CHECK: @fp1
- // CHECK: load atomic i64, i64* {{.*}} seq_cst, align 8
+ // CHECK: load atomic i64, ptr {{.*}} seq_cst, align 8
return __c11_atomic_load(p, memory_order_seq_cst);
}
@@ -150,20 +150,20 @@ int *fp2a(int **p) {
_Complex float fc(_Atomic(_Complex float) *c) {
// CHECK: @fc
- // CHECK: atomicrmw xchg i64* {{.*}} seq_cst, align 8
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 8
return __c11_atomic_exchange(c, 2, memory_order_seq_cst);
}
typedef struct X { int x; } X;
X fs(_Atomic(X) *c) {
// CHECK: @fs
- // CHECK: atomicrmw xchg i32* {{.*}} seq_cst, align 4
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 4
return __c11_atomic_exchange(c, (X){2}, memory_order_seq_cst);
}
X fsa(X *c, X *d) {
// CHECK: @fsa
- // CHECK: atomicrmw xchg i32* {{.*}} seq_cst, align 4
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 4
X ret;
__atomic_exchange(c, d, &ret, memory_order_seq_cst);
return ret;
@@ -171,20 +171,20 @@ X fsa(X *c, X *d) {
_Bool fsb(_Bool *c) {
// CHECK: @fsb
- // CHECK: atomicrmw xchg i8* {{.*}} seq_cst, align 1
+ // CHECK: atomicrmw xchg ptr {{.*}} seq_cst, align 1
return __atomic_exchange_n(c, 1, memory_order_seq_cst);
}
char flag1;
volatile char flag2;
void test_and_set(void) {
- // CHECK: atomicrmw xchg i8* @flag1, i8 1 seq_cst, align 1
+ // CHECK: atomicrmw xchg ptr @flag1, i8 1 seq_cst, align 1
__atomic_test_and_set(&flag1, memory_order_seq_cst);
- // CHECK: atomicrmw volatile xchg i8* @flag2, i8 1 acquire, align 1
+ // CHECK: atomicrmw volatile xchg ptr @flag2, i8 1 acquire, align 1
__atomic_test_and_set(&flag2, memory_order_acquire);
- // CHECK: store atomic volatile i8 0, i8* @flag2 release, align 1
+ // CHECK: store atomic volatile i8 0, ptr @flag2 release, align 1
__atomic_clear(&flag2, memory_order_release);
- // CHECK: store atomic i8 0, i8* @flag1 seq_cst, align 1
+ // CHECK: store atomic i8 0, ptr @flag1 seq_cst, align 1
__atomic_clear(&flag1, memory_order_seq_cst);
}
@@ -198,13 +198,13 @@ struct Seventeen {
int lock_free(struct Incomplete *incomplete) {
// CHECK: @lock_free
- // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 3, i8* noundef null)
+ // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 3, ptr noundef null)
__c11_atomic_is_lock_free(3);
- // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, i8* noundef {{.*}}@sixteen{{.*}})
+ // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef {{.*}}@sixteen{{.*}})
__atomic_is_lock_free(16, &sixteen);
- // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 17, i8* noundef {{.*}}@seventeen{{.*}})
+ // CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 17, ptr noundef {{.*}}@seventeen{{.*}})
__atomic_is_lock_free(17, &seventeen);
// CHECK: call zeroext i1 @__atomic_is_lock_free(i64 noundef 4, {{.*}})
@@ -247,36 +247,36 @@ void structAtomicStore(void) {
// CHECK: @structAtomicStore
struct foo f = {0};
__c11_atomic_store(&bigAtomic, f, 5);
- // CHECK: call void @__atomic_store(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
+ // CHECK: call void @__atomic_store(i64 noundef 512, ptr noundef @bigAtomic,
struct bar b = {0};
__atomic_store(&smallThing, &b, 5);
- // CHECK: call void @__atomic_store(i64 noundef 3, i8* noundef {{.*}} @smallThing
+ // CHECK: call void @__atomic_store(i64 noundef 3, ptr noundef @smallThing
__atomic_store(&bigThing, &f, 5);
- // CHECK: call void @__atomic_store(i64 noundef 512, i8* noundef {{.*}} @bigThing
+ // CHECK: call void @__atomic_store(i64 noundef 512, ptr noundef @bigThing
}
void structAtomicLoad(void) {
// CHECK: @structAtomicLoad
struct foo f = __c11_atomic_load(&bigAtomic, 5);
- // CHECK: call void @__atomic_load(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
+ // CHECK: call void @__atomic_load(i64 noundef 512, ptr noundef @bigAtomic,
struct bar b;
__atomic_load(&smallThing, &b, 5);
- // CHECK: call void @__atomic_load(i64 noundef 3, i8* noundef {{.*}} @smallThing
+ // CHECK: call void @__atomic_load(i64 noundef 3, ptr noundef @smallThing
__atomic_load(&bigThing, &f, 5);
- // CHECK: call void @__atomic_load(i64 noundef 512, i8* noundef {{.*}} @bigThing
+ // CHECK: call void @__atomic_load(i64 noundef 512, ptr noundef @bigThing
}
struct foo structAtomicExchange(void) {
// CHECK: @structAtomicExchange
struct foo f = {0};
struct foo old;
__atomic_exchange(&f, &bigThing, &old, 5);
- // CHECK: call void @__atomic_exchange(i64 noundef 512, {{.*}}, i8* noundef bitcast ({{.*}} @bigThing to i8*),
+ // CHECK: call void @__atomic_exchange(i64 noundef 512, {{.*}}, ptr noundef @bigThing,
return __c11_atomic_exchange(&bigAtomic, f, 5);
- // CHECK: call void @__atomic_exchange(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
+ // CHECK: call void @__atomic_exchange(i64 noundef 512, ptr noundef @bigAtomic,
}
int structAtomicCmpExchange(void) {
// CHECK: @structAtomicCmpExchange
@@ -287,7 +287,7 @@ int structAtomicCmpExchange(void) {
struct foo g = {0};
g.big[12] = 12;
return x & __c11_atomic_compare_exchange_strong(&bigAtomic, &f, g, 5, 5);
- // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 noundef 512, i8* noundef bitcast ({{.*}} @bigAtomic to i8*),
+ // CHECK: call zeroext i1 @__atomic_compare_exchange(i64 noundef 512, ptr noundef @bigAtomic,
}
// Check that no atomic operations are used in any initialisation of _Atomic
diff --git a/clang/test/CodeGen/blocks.c b/clang/test/CodeGen/blocks.c
index 9f9d7fe55ef7..f6a36c8b7bef 100644
--- a/clang/test/CodeGen/blocks.c
+++ b/clang/test/CodeGen/blocks.c
@@ -1,9 +1,7 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple i386-unknown-unknown %s -emit-llvm -Wno-strict-prototypes -o - -fblocks | FileCheck %s
+// RUN: %clang_cc1 -triple i386-unknown-unknown %s -emit-llvm -Wno-strict-prototypes -o - -fblocks | FileCheck %s
-// CHECK: %[[STRUCT_BLOCK_DESCRIPTOR:.*]] = type { i32, i32 }
-
-// CHECK: @{{.*}} = internal constant { i32, i32, i8*, i8*, i8*, i8* } { i32 0, i32 24, i8* bitcast (void (i8*, i8*)* @__copy_helper_block_4_20r to i8*), i8* bitcast (void (i8*)* @__destroy_helper_block_4_20r to i8*), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @{{.*}}, i32 0, i32 0), i8* null }, align 4
-// CHECK: @[[BLOCK_DESCRIPTOR_TMP21:.*]] = internal constant { i32, i32, i8*, i8*, i8*, i8* } { i32 0, i32 24, i8* bitcast (void (i8*, i8*)* @__copy_helper_block_4_20r to i8*), i8* bitcast (void (i8*)* @__destroy_helper_block_4_20r to i8*), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @{{.*}}, i32 0, i32 0), i8* null }, align 4
+// CHECK: @{{.*}} = internal constant { i32, i32, ptr, ptr, ptr, ptr } { i32 0, i32 24, ptr @__copy_helper_block_4_20r, ptr @__destroy_helper_block_4_20r, ptr @{{.*}}, ptr null }, align 4
+// CHECK: @[[BLOCK_DESCRIPTOR_TMP21:.*]] = internal constant { i32, i32, ptr, ptr, ptr, ptr } { i32 0, i32 24, ptr @__copy_helper_block_4_20r, ptr @__destroy_helper_block_4_20r, ptr @{{.*}}, ptr null }, align 4
void (^f)(void) = ^{};
@@ -18,7 +16,7 @@ struct s0 {
int a[64];
};
-// CHECK: define internal void @__f2_block_invoke(%struct.s0* noalias sret(%struct.s0) align 4 {{%.*}}, i8* noundef {{%.*}}, %struct.s0* noundef byval(%struct.s0) align 4 {{.*}})
+// CHECK: define internal void @__f2_block_invoke(ptr noalias sret(%struct.s0) align 4 {{%.*}}, ptr noundef {{%.*}}, ptr noundef byval(%struct.s0) align 4 {{.*}})
struct s0 f2(struct s0 a0) {
return ^(struct s0 a1){ return a1; }(a0);
}
@@ -33,30 +31,26 @@ void (^test1)(void) = ^(void) {
^ { i = 1; }();
};
-// CHECK-LABEL: define linkonce_odr hidden void @__copy_helper_block_4_20r(i8* noundef %0, i8* noundef %1) unnamed_addr
-// CHECK: %[[_ADDR:.*]] = alloca i8*, align 4
-// CHECK-NEXT: %[[_ADDR1:.*]] = alloca i8*, align 4
-// CHECK-NEXT: store i8* %0, i8** %[[_ADDR]], align 4
-// CHECK-NEXT: store i8* %1, i8** %[[_ADDR1]], align 4
-// CHECK-NEXT: %[[V2:.*]] = load i8*, i8** %[[_ADDR1]], align 4
-// CHECK-NEXT: %[[BLOCK_SOURCE:.*]] = bitcast i8* %[[V2]] to <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>*
-// CHECK-NEXT: %[[V3:.*]] = load i8*, i8** %[[_ADDR]], align 4
-// CHECK-NEXT: %[[BLOCK_DEST:.*]] = bitcast i8* %[[V3]] to <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>*
-// CHECK-NEXT: %[[V4:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK_SOURCE]], i32 0, i32 5
-// CHECK-NEXT: %[[V5:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK_DEST]], i32 0, i32 5
-// CHECK-NEXT: %[[BLOCKCOPY_SRC:.*]] = load i8*, i8** %[[V4]], align 4
-// CHECK-NEXT: %[[V6:.*]] = bitcast i8** %[[V5]] to i8*
-// CHECK-NEXT: call void @_Block_object_assign(i8* %[[V6]], i8* %[[BLOCKCOPY_SRC]], i32 8)
+// CHECK-LABEL: define linkonce_odr hidden void @__copy_helper_block_4_20r(ptr noundef %0, ptr noundef %1) unnamed_addr
+// CHECK: %[[_ADDR:.*]] = alloca ptr, align 4
+// CHECK-NEXT: %[[_ADDR1:.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr %0, ptr %[[_ADDR]], align 4
+// CHECK-NEXT: store ptr %1, ptr %[[_ADDR1]], align 4
+// CHECK-NEXT: %[[V2:.*]] = load ptr, ptr %[[_ADDR1]], align 4
+// CHECK-NEXT: %[[V3:.*]] = load ptr, ptr %[[_ADDR]], align 4
+// CHECK-NEXT: %[[V4:.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, ptr }>, ptr %[[V2]], i32 0, i32 5
+// CHECK-NEXT: %[[V5:.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, ptr }>, ptr %[[V3]], i32 0, i32 5
+// CHECK-NEXT: %[[BLOCKCOPY_SRC:.*]] = load ptr, ptr %[[V4]], align 4
+// CHECK-NEXT: call void @_Block_object_assign(ptr %[[V5]], ptr %[[BLOCKCOPY_SRC]], i32 8)
// CHECK-NEXT: ret void
-// CHECK-LABEL: define linkonce_odr hidden void @__destroy_helper_block_4_20r(i8* noundef %0) unnamed_addr
-// CHECK: %[[_ADDR:.*]] = alloca i8*, align 4
-// CHECK-NEXT: store i8* %0, i8** %[[_ADDR]], align 4
-// CHECK-NEXT: %[[V1:.*]] = load i8*, i8** %[[_ADDR]], align 4
-// CHECK-NEXT: %[[BLOCK:.*]] = bitcast i8* %[[V1]] to <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>*
-// CHECK-NEXT: %[[V2:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %[[BLOCK]], i32 0, i32 5
-// CHECK-NEXT: %[[V3:.*]] = load i8*, i8** %[[V2]], align 4
-// CHECK-NEXT: call void @_Block_object_dispose(i8* %[[V3]], i32 8)
+// CHECK-LABEL: define linkonce_odr hidden void @__destroy_helper_block_4_20r(ptr noundef %0) unnamed_addr
+// CHECK: %[[_ADDR:.*]] = alloca ptr, align 4
+// CHECK-NEXT: store ptr %0, ptr %[[_ADDR]], align 4
+// CHECK-NEXT: %[[V1:.*]] = load ptr, ptr %[[_ADDR]], align 4
+// CHECK-NEXT: %[[V2:.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, ptr }>, ptr %[[V1]], i32 0, i32 5
+// CHECK-NEXT: %[[V3:.*]] = load ptr, ptr %[[V2]], align 4
+// CHECK-NEXT: call void @_Block_object_dispose(ptr %[[V3]], i32 8)
// CHECK-NEXT: ret void
typedef double ftype(double);
@@ -81,7 +75,7 @@ void f4_helper(long long (^)(void));
void f4(void) {
_Bool b = 0;
long long ll = 0;
- // CHECK: alloca <{ i8*, i32, i32, i8*, {{%.*}}*, i8, [3 x i8], i64 }>, align 8
+ // CHECK: alloca <{ ptr, i32, i32, ptr, ptr, i8, [3 x i8], i64 }>, align 8
f4_helper(^{ if (b) return ll; return 0LL; });
}
@@ -95,7 +89,7 @@ void f5_helper(void (^)(struct F5 *));
// CHECK-LABEL: define{{.*}} void @f5()
void f5(void) {
struct F5 value;
- // CHECK: alloca <{ i8*, i32, i32, i8*, {{%.*}}*, [12 x i8], [[F5:%.*]] }>, align 16
+ // CHECK: alloca <{ ptr, i32, i32, ptr, ptr, [12 x i8], [[F5:%.*]] }>, align 16
f5_helper(^(struct F5 *slot) { *slot = value; });
}
@@ -104,11 +98,10 @@ void (^b)() = ^{};
int main(void) {
(b?: ^{})();
}
-// CHECK: [[ZERO:%.*]] = load void (...)*, void (...)** @b
-// CHECK-NEXT: [[TB:%.*]] = icmp ne void (...)* [[ZERO]], null
+// CHECK: [[ZERO:%.*]] = load ptr, ptr @b
+// CHECK-NEXT: [[TB:%.*]] = icmp ne ptr [[ZERO]], null
// CHECK-NEXT: br i1 [[TB]], label [[CT:%.*]], label [[CF:%.*]]
-// CHECK: [[ONE:%.*]] = bitcast void (...)* [[ZERO]] to void ()*
-// CHECK-NEXT: br label [[CE:%.*]]
+// CHECK: br label [[CE:%.*]]
// Ensure that we don't emit helper code in copy/dispose routines for variables
// that are const-captured.
@@ -118,7 +111,7 @@ void testConstCaptureInCopyAndDestroyHelpers(void) {
(^ { i = x; })();
}
// CHECK-LABEL: define{{.*}} void @testConstCaptureInCopyAndDestroyHelpers(
-// CHECK: %[[BLOCK_DESCRIPTOR:.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>, <{ i8*, i32, i32, i8*, %[[STRUCT_BLOCK_DESCRIPTOR]]*, i8* }>* %{{.*}}, i32 0, i32 4
-// CHECK: store %[[STRUCT_BLOCK_DESCRIPTOR]]* bitcast ({ i32, i32, i8*, i8*, i8*, i8* }* @[[BLOCK_DESCRIPTOR_TMP21]] to %[[STRUCT_BLOCK_DESCRIPTOR]]*), %[[STRUCT_BLOCK_DESCRIPTOR]]** %[[BLOCK_DESCRIPTOR]], align 4
+// CHECK: %[[BLOCK_DESCRIPTOR:.*]] = getelementptr inbounds <{ ptr, i32, i32, ptr, ptr, ptr }>, ptr %{{.*}}, i32 0, i32 4
+// CHECK: store ptr @[[BLOCK_DESCRIPTOR_TMP21]], ptr %[[BLOCK_DESCRIPTOR]], align 4
// CHECK-LABEL: define internal void @__testConstCaptureInCopyAndDestroyHelpers_block_invoke
diff --git a/clang/test/CodeGen/bpf-attr-preserve-access-index-1.c b/clang/test/CodeGen/bpf-attr-preserve-access-index-1.c
index cc775a20feaf..1b8af6c1658b 100644
--- a/clang/test/CodeGen/bpf-attr-preserve-access-index-1.c
+++ b/clang/test/CodeGen/bpf-attr-preserve-access-index-1.c
@@ -1,5 +1,5 @@
// REQUIRES: bpf-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
#define __reloc__ __attribute__((preserve_access_index))
@@ -17,7 +17,7 @@ int test(__s1 *arg) {
return arg->a + arg[1].b;
}
-// CHECK: call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0)
-// CHECK: call %struct.s1* @llvm.preserve.array.access.index.p0s_struct.s1s.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 1)
-// CHECK: call %union.anon* @llvm.preserve.struct.access.index.p0s_union.anons.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1)
-// CHECK: call %union.anon* @llvm.preserve.union.access.index.p0s_union.anons.p0s_union.anons(%union.anon* %{{[0-9a-z]+}}, i32 0)
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0)
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 1)
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1)
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 0)
diff --git a/clang/test/CodeGen/bpf-attr-preserve-access-index-2.c b/clang/test/CodeGen/bpf-attr-preserve-access-index-2.c
index 0d97ece8ec58..f75474ab9ff0 100644
--- a/clang/test/CodeGen/bpf-attr-preserve-access-index-2.c
+++ b/clang/test/CodeGen/bpf-attr-preserve-access-index-2.c
@@ -1,5 +1,5 @@
// REQUIRES: bpf-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
#define __reloc__ __attribute__((preserve_access_index))
@@ -17,8 +17,8 @@ int test(__s1 *arg) {
return arg->a[2] + arg->c[2];
}
-// CHECK: call [3 x i32]* @llvm.preserve.struct.access.index.p0a3i32.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0)
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0a3i32([3 x i32]* elementtype([3 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2)
-// CHECK: call %union.anon* @llvm.preserve.struct.access.index.p0s_union.anons.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1)
-// CHECK: call %union.anon* @llvm.preserve.union.access.index.p0s_union.anons.p0s_union.anons(%union.anon* %{{[0-9a-z]+}}, i32 1)
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0a4i32([4 x i32]* elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2)
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0)
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([3 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2)
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1)
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 1)
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2)
diff --git a/clang/test/CodeGen/builtin-preserve-access-index-array.c b/clang/test/CodeGen/builtin-preserve-access-index-array.c
index 6f0bda53277d..3ac5409f924f 100644
--- a/clang/test/CodeGen/builtin-preserve-access-index-array.c
+++ b/clang/test/CodeGen/builtin-preserve-access-index-array.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
#define _(x) (__builtin_preserve_access_index(x))
@@ -10,9 +10,9 @@ struct s1 {
const void *unit1(struct s1 *arg) {
return _(&arg->b[2]);
}
-// CHECK: define dso_local i8* @unit1
-// CHECK: call [4 x i32]* @llvm.preserve.struct.access.index.p0a4i32.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0a4i32([4 x i32]* elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[ARRAY:[0-9]+]]
+// CHECK: define dso_local ptr @unit1
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[ARRAY:[0-9]+]]
//
// CHECK: ![[ARRAY]] = !DICompositeType(tag: DW_TAG_array_type
// CHECK: ![[STRUCT_S1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s1"
diff --git a/clang/test/CodeGen/builtin-preserve-access-index-nonptr.c b/clang/test/CodeGen/builtin-preserve-access-index-nonptr.c
index ae203899a00c..319498c09ef5 100644
--- a/clang/test/CodeGen/builtin-preserve-access-index-nonptr.c
+++ b/clang/test/CodeGen/builtin-preserve-access-index-nonptr.c
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
#define _(x) (__builtin_preserve_access_index(x))
@@ -11,8 +11,8 @@ int unit1(struct s1 *arg) {
return _(arg->b[2]);
}
// CHECK: define dso_local i32 @unit1
-// CHECK: call [4 x i32]* @llvm.preserve.struct.access.index.p0a4i32.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0a4i32([4 x i32]* elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[ARRAY:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[ARRAY:[0-9]+]]
//
// CHECK: ![[ARRAY]] = !DICompositeType(tag: DW_TAG_array_type
// CHECK: ![[STRUCT_S1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s1"
diff --git a/clang/test/CodeGen/builtin-preserve-access-index-typedef.c b/clang/test/CodeGen/builtin-preserve-access-index-typedef.c
index 9791626c8fde..a3c21e9bd651 100644
--- a/clang/test/CodeGen/builtin-preserve-access-index-typedef.c
+++ b/clang/test/CodeGen/builtin-preserve-access-index-typedef.c
@@ -1,5 +1,5 @@
// REQUIRES: bpf-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)
typedef struct {
@@ -15,9 +15,9 @@ int test2(const __u *arg) { return arg->b; }
// CHECK: define dso_local i32 @test1
-// CHECK: call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.__ts(%struct.__t* elementtype(%struct.__t) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[TYPEDEF_STRUCT:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.__t) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[TYPEDEF_STRUCT:[0-9]+]]
// CHECK: define dso_local i32 @test2
-// CHECK: call %union.__u* @llvm.preserve.union.access.index.p0s_union.__us.p0s_union.__us(%union.__u* %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[CONST_TYPEDEF:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[CONST_TYPEDEF:[0-9]+]]
//
// CHECK: ![[TYPEDEF_STRUCT]] = !DIDerivedType(tag: DW_TAG_typedef, name: "__t"
// CHECK: ![[CONST_TYPEDEF]] = !DIDerivedType(tag: DW_TAG_const_type, baseType: ![[TYPEDEF_UNION:[0-9]+]]
diff --git a/clang/test/CodeGen/builtin-preserve-access-index.c b/clang/test/CodeGen/builtin-preserve-access-index.c
index 68faa58114cb..d1829b171de8 100644
--- a/clang/test/CodeGen/builtin-preserve-access-index.c
+++ b/clang/test/CodeGen/builtin-preserve-access-index.c
@@ -1,11 +1,11 @@
-// RUN: %clang_cc1 -no-opaque-pointers -triple x86_64 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64 -emit-llvm -debug-info-kind=limited %s -o - | FileCheck %s
#define _(x) (__builtin_preserve_access_index(x))
const void *unit1(const void *arg) {
return _(arg);
}
-// CHECK: define dso_local i8* @unit1
+// CHECK: define dso_local ptr @unit1
// CHECK-NOT: llvm.preserve.array.access.index
// CHECK-NOT: llvm.preserve.struct.access.index
// CHECK-NOT: llvm.preserve.union.access.index
@@ -13,7 +13,7 @@ const void *unit1(const void *arg) {
const void *unit2(void) {
return _((const void *)0xffffffffFFFF0000ULL);
}
-// CHECK: define dso_local i8* @unit2
+// CHECK: define dso_local ptr @unit2
// CHECK-NOT: llvm.preserve.array.access.index
// CHECK-NOT: llvm.preserve.struct.access.index
// CHECK-NOT: llvm.preserve.union.access.index
@@ -21,7 +21,7 @@ const void *unit2(void) {
const void *unit3(const int *arg) {
return _(arg + 1);
}
-// CHECK: define dso_local i8* @unit3
+// CHECK: define dso_local ptr @unit3
// CHECK-NOT: llvm.preserve.array.access.index
// CHECK-NOT: llvm.preserve.struct.access.index
// CHECK-NOT: llvm.preserve.union.access.index
@@ -29,18 +29,18 @@ const void *unit3(const int *arg) {
const void *unit4(const int *arg) {
return _(&arg[1]);
}
-// CHECK: define dso_local i8* @unit4
+// CHECK: define dso_local ptr @unit4
// CHECK-NOT: getelementptr
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0i32(i32* elementtype(i32) %{{[0-9a-z]+}}, i32 0, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[POINTER:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(i32) %{{[0-9a-z]+}}, i32 0, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[POINTER:[0-9]+]]
const void *unit5(const int *arg[5]) {
return _(&arg[1][2]);
}
-// CHECK: define dso_local i8* @unit5
+// CHECK: define dso_local ptr @unit5
// CHECK-NOT: getelementptr
-// CHECK: call i32** @llvm.preserve.array.access.index.p0p0i32.p0p0i32(i32** elementtype(i32*) %{{[0-9a-z]+}}, i32 0, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(ptr) %{{[0-9a-z]+}}, i32 0, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
// CHECK-NOT: getelementptr
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0i32(i32* elementtype(i32) %{{[0-9a-z]+}}, i32 0, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[POINTER:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(i32) %{{[0-9a-z]+}}, i32 0, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[POINTER:[0-9]+]]
struct s1 {
char a;
@@ -63,30 +63,30 @@ struct s3 {
const void *unit6(struct s1 *arg) {
return _(&arg->a);
}
-// CHECK: define dso_local i8* @unit6
+// CHECK: define dso_local ptr @unit6
// CHECK-NOT: getelementptr
-// CHECK: call i8* @llvm.preserve.struct.access.index.p0i8.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
const void *unit7(struct s1 *arg) {
return _(&arg->b);
}
-// CHECK: define dso_local i8* @unit7
+// CHECK: define dso_local ptr @unit7
// CHECK-NOT: getelementptr
-// CHECK: call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1]]
const void *unit8(struct s2 *arg) {
return _(&arg->b);
}
-// CHECK: define dso_local i8* @unit8
+// CHECK: define dso_local ptr @unit8
// CHECK-NOT: getelementptr
-// CHECK: call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.s2s(%struct.s2* elementtype(%struct.s2) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S2:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s2) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S2:[0-9]+]]
const void *unit9(struct s3 *arg) {
return _(&arg->b);
}
-// CHECK: define dso_local i8* @unit9
+// CHECK: define dso_local ptr @unit9
// CHECK-NOT: getelementptr
-// CHECK: call i32* @llvm.preserve.struct.access.index.p0i32.p0s_struct.s3s(%struct.s3* elementtype(%struct.s3) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S3:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s3) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S3:[0-9]+]]
union u1 {
char a;
@@ -102,23 +102,23 @@ union u2 {
const void *unit10(union u1 *arg) {
return _(&arg->a);
}
-// CHECK: define dso_local i8* @unit10
+// CHECK: define dso_local ptr @unit10
// CHECK-NOT: getelementptr
-// CHECK: call %union.u1* @llvm.preserve.union.access.index.p0s_union.u1s.p0s_union.u1s(%union.u1* %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1:[0-9]+]]
const void *unit11(union u1 *arg) {
return _(&arg->b);
}
-// CHECK: define dso_local i8* @unit11
+// CHECK: define dso_local ptr @unit11
// CHECK-NOT: getelementptr
-// CHECK: call %union.u1* @llvm.preserve.union.access.index.p0s_union.u1s.p0s_union.u1s(%union.u1* %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1]]
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1]]
const void *unit12(union u2 *arg) {
return _(&arg->b);
}
-// CHECK: define dso_local i8* @unit12
+// CHECK: define dso_local ptr @unit12
// CHECK-NOT: getelementptr
-// CHECK: call %union.u2* @llvm.preserve.union.access.index.p0s_union.u2s.p0s_union.u2s(%union.u2* %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U2:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U2:[0-9]+]]
struct s4 {
char d;
@@ -138,33 +138,33 @@ union u3 {
const void *unit13(struct s4 *arg) {
return _(&arg->c.b[2]);
}
-// CHECK: define dso_local i8* @unit13
-// CHECK: call %union.u* @llvm.preserve.struct.access.index.p0s_union.us.p0s_struct.s4s(%struct.s4* elementtype(%struct.s4) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S4:[0-9]+]]
-// CHECK: call %union.u* @llvm.preserve.union.access.index.p0s_union.us.p0s_union.us(%union.u* %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_I_U:[0-9]+]]
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0a4i32([4 x i32]* elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
+// CHECK: define dso_local ptr @unit13
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s4) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S4:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_I_U:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
const void *unit14(union u3 *arg) {
return _(&arg->c.b[2]);
}
-// CHECK: define dso_local i8* @unit14
-// CHECK: call %union.u3* @llvm.preserve.union.access.index.p0s_union.u3s.p0s_union.u3s(%union.u3* %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U3:[0-9]+]]
-// CHECK: call [4 x i32]* @llvm.preserve.struct.access.index.p0a4i32.p0s_struct.ss(%struct.s* elementtype(%struct.s) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_I_S:[0-9]+]]
-// CHECK: call i32* @llvm.preserve.array.access.index.p0i32.p0a4i32([4 x i32]* elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
+// CHECK: define dso_local ptr @unit14
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U3:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_I_S:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype([4 x i32]) %{{[0-9a-z]+}}, i32 1, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
const void *unit15(struct s4 *arg) {
return _(&arg[2].c.a);
}
-// CHECK: define dso_local i8* @unit15
-// CHECK: call %struct.s4* @llvm.preserve.array.access.index.p0s_struct.s4s.p0s_struct.s4s(%struct.s4* elementtype(%struct.s4) %{{[0-9a-z]+}}, i32 0, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
-// CHECK: call %union.u* @llvm.preserve.struct.access.index.p0s_union.us.p0s_struct.s4s(%struct.s4* elementtype(%struct.s4) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S4]]
-// CHECK: call %union.u* @llvm.preserve.union.access.index.p0s_union.us.p0s_union.us(%union.u* %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_I_U]]
+// CHECK: define dso_local ptr @unit15
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(%struct.s4) %{{[0-9a-z]+}}, i32 0, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s4) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S4]]
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_I_U]]
const void *unit16(union u3 *arg) {
return _(&arg[2].a);
}
-// CHECK: define dso_local i8* @unit16
-// CHECK: call %union.u3* @llvm.preserve.array.access.index.p0s_union.u3s.p0s_union.u3s(%union.u3* elementtype(%union.u3) %{{[0-9a-z]+}}, i32 0, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
-// CHECK: call %union.u3* @llvm.preserve.union.access.index.p0s_union.u3s.p0s_union.u3s(%union.u3* %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U3]]
+// CHECK: define dso_local ptr @unit16
+// CHECK: call ptr @llvm.preserve.array.access.index.p0.p0(ptr elementtype(%union.u3) %{{[0-9a-z]+}}, i32 0, i32 2), !dbg !{{[0-9]+}}, !llvm.preserve.access.index !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U3]]
// CHECK: ![[POINTER]] = !DIDerivedType(tag: DW_TAG_pointer_type
// CHECK: ![[STRUCT_S4]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s4"
diff --git a/clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c b/clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c
index fcb3c39c182b..e0b289d6934a 100644
--- a/clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c
+++ b/clang/test/CodeGen/builtins-bpf-preserve-field-info-1.c
@@ -1,5 +1,5 @@
// REQUIRES: bpf-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
#define _(x, y) (__builtin_preserve_field_info((x), (y)))
@@ -17,19 +17,19 @@ unsigned unit1(struct s1 *arg) {
return _(arg->a, 10) + _(arg->b, 10);
}
// CHECK: define dso_local i32 @unit1
-// CHECK: call i8* @llvm.preserve.struct.access.index.p0i8.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
-// CHECK: call i32 @llvm.bpf.preserve.field.info.p0i8(i8* %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
-// CHECK: call i8* @llvm.preserve.struct.access.index.p0i8.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
-// CHECK: call i32 @llvm.bpf.preserve.field.info.p0i8(i8* %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
+// CHECK: call i32 @llvm.bpf.preserve.field.info.p0(ptr %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
+// CHECK: call i32 @llvm.bpf.preserve.field.info.p0(ptr %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
unsigned unit2(union u1 *arg) {
return _(arg->a, 10) + _(arg->b, 10);
}
// CHECK: define dso_local i32 @unit2
-// CHECK: call %union.u1* @llvm.preserve.union.access.index.p0s_union.u1s.p0s_union.u1s(%union.u1* %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1:[0-9]+]]
-// CHECK: call i32 @llvm.bpf.preserve.field.info.p0i8(i8* %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
-// CHECK: call i8* @llvm.preserve.struct.access.index.p0i8.p0s_union.u1s(%union.u1* elementtype(%union.u1) %{{[0-9a-z]+}}, i32 0, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1:[0-9]+]]
-// CHECK: call i32 @llvm.bpf.preserve.field.info.p0i8(i8* %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.union.access.index.p0.p0(ptr %{{[0-9a-z]+}}, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1:[0-9]+]]
+// CHECK: call i32 @llvm.bpf.preserve.field.info.p0(ptr %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%union.u1) %{{[0-9a-z]+}}, i32 0, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[UNION_U1:[0-9]+]]
+// CHECK: call i32 @llvm.bpf.preserve.field.info.p0(ptr %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
// CHECK: ![[STRUCT_S1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s1"
// CHECK: ![[UNION_U1]] = distinct !DICompositeType(tag: DW_TAG_union_type, name: "u1"
diff --git a/clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c b/clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c
index bd7fbc9f5bd8..ad4f64ab403b 100644
--- a/clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c
+++ b/clang/test/CodeGen/builtins-bpf-preserve-field-info-2.c
@@ -1,5 +1,5 @@
// REQUIRES: bpf-registered-target
-// RUN: %clang_cc1 -no-opaque-pointers -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
+// RUN: %clang_cc1 -triple bpf -emit-llvm -debug-info-kind=limited -disable-llvm-passes %s -o - | FileCheck %s
#define _(x, y) (__builtin_preserve_field_info((x), (y)))
@@ -15,12 +15,12 @@ unsigned unit1(struct s2 *arg) {
return _(arg->s.a, 10) + _(arg->s.b, 10);
}
// CHECK: define dso_local i32 @unit1
-// CHECK: call %struct.s1* @llvm.preserve.struct.access.index.p0s_struct.s1s.p0s_struct.s2s(%struct.s2* elementtype(%struct.s2) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S2:[0-9]+]]
-// CHECK: call i8* @llvm.preserve.struct.access.index.p0i8.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
-// CHECK: call i32 @llvm.bpf.preserve.field.info.p0i8(i8* %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
-// CHECK: call %struct.s1* @llvm.preserve.struct.access.index.p0s_struct.s1s.p0s_struct.s2s(%struct.s2* elementtype(%struct.s2) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S2:[0-9]+]]
-// CHECK: call i8* @llvm.preserve.struct.access.index.p0i8.p0s_struct.s1s(%struct.s1* elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
-// CHECK: call i32 @llvm.bpf.preserve.field.info.p0i8(i8* %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s2) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S2:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
+// CHECK: call i32 @llvm.bpf.preserve.field.info.p0(ptr %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s2) %{{[0-9a-z]+}}, i32 0, i32 0), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S2:[0-9]+]]
+// CHECK: call ptr @llvm.preserve.struct.access.index.p0.p0(ptr elementtype(%struct.s1) %{{[0-9a-z]+}}, i32 1, i32 1), !dbg !{{[0-9]+}}, !llvm.preserve.access.index ![[STRUCT_S1:[0-9]+]]
+// CHECK: call i32 @llvm.bpf.preserve.field.info.p0(ptr %{{[0-9a-z]+}}, i64 10), !dbg !{{[0-9]+}}
// CHECK: ![[STRUCT_S2]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s2"
// CHECK: ![[STRUCT_S1]] = distinct !DICompositeType(tag: DW_TAG_structure_type, name: "s1"
diff --git a/clang/test/CodeGenCXX/microsoft-abi-thread-safe-statics.cpp b/clang/test/CodeGenCXX/microsoft-abi-thread-safe-statics.cpp
index de2b05b05f43..7f8288cf96d5 100644
--- a/clang/test/CodeGenCXX/microsoft-abi-thread-safe-statics.cpp
+++ b/clang/test/CodeGenCXX/microsoft-abi-thread-safe-statics.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -no-opaque-pointers -fexceptions -fcxx-exceptions -fms-extensions -fms-compatibility -fms-compatibility-version=19 -std=c++11 -emit-llvm %s -o - -triple=i386-pc-win32 | FileCheck %s
+// RUN: %clang_cc1 -fexceptions -fcxx-exceptions -fms-extensions -fms-compatibility -fms-compatibility-version=19 -std=c++11 -emit-llvm %s -o - -triple=i386-pc-win32 | FileCheck %s
// REQUIRES: asserts
struct S {
@@ -19,33 +19,33 @@ struct S {
// CHECK-DAG: @"?$TSS0@?1??g1@@YAHXZ at 4HA" = internal global i32 0, align 4
// CHECK-LABEL: define {{.*}} @"?f@@YAAAUS@@XZ"()
-// CHECK-SAME: personality i8* bitcast (i32 (...)* @__CxxFrameHandler3 to i8*)
+// CHECK-SAME: personality ptr @__CxxFrameHandler3
extern inline S &f() {
static thread_local S s;
-// CHECK: %[[guard:.*]] = load i32, i32* @"??__J?1??f@@YAAAUS@@XZ at 51"
+// CHECK: %[[guard:.*]] = load i32, ptr @"??__J?1??f@@YAAAUS@@XZ at 51"
// CHECK-NEXT: %[[mask:.*]] = and i32 %[[guard]], 1
// CHECK-NEXT: %[[cmp:.*]] = icmp eq i32 %[[mask]], 0
// CHECK-NEXT: br i1 %[[cmp]], label %[[init:.*]], label %[[init_end:.*]], !prof ![[unlikely_threadlocal:.*]]
//
// CHECK: [[init]]:
// CHECK-NEXT: %[[or:.*]] = or i32 %[[guard]], 1
-// CHECK-NEXT: store i32 %[[or]], i32* @"??__J?1??f@@YAAAUS@@XZ at 51"
-// CHECK-NEXT: invoke {{.*}} @"??0S@@QAE at XZ"(%struct.S* {{[^,]*}} @"?s@?1??f@@YAAAUS@@XZ at 4U2@A")
+// CHECK-NEXT: store i32 %[[or]], ptr @"??__J?1??f@@YAAAUS@@XZ at 51"
+// CHECK-NEXT: invoke {{.*}} @"??0S@@QAE at XZ"(ptr {{[^,]*}} @"?s@?1??f@@YAAAUS@@XZ at 4U2@A")
// CHECK-NEXT: to label %[[invoke_cont:.*]] unwind label %[[lpad:.*]]
//
// CHECK: [[invoke_cont]]:
-// CHECK-NEXT: call i32 @__tlregdtor(void ()* @"??__Fs@?1??f@@YAAAUS@@XZ at YAXXZ")
+// CHECK-NEXT: call i32 @__tlregdtor(ptr @"??__Fs@?1??f@@YAAAUS@@XZ at YAXXZ")
// CHECK-NEXT: br label %[[init_end:.*]]
// CHECK: [[init_end]]:
-// CHECK: [[S_ADDR:%.+]] = call align 1 %struct.S* @llvm.threadlocal.address.p0s_struct.Ss(%struct.S* align 1 @"?s@?1??f@@YAAAUS@@XZ at 4U2@A")
-// CHECK-NEXT: ret %struct.S* [[S_ADDR]]
+// CHECK: [[S_ADDR:%.+]] = call align 1 ptr @llvm.threadlocal.address.p0(ptr align 1 @"?s@?1??f@@YAAAUS@@XZ at 4U2@A")
+// CHECK-NEXT: ret ptr [[S_ADDR]]
// CHECK: [[lpad:.*]]:
// CHECK-NEXT: cleanuppad within none []
-// CHECK: %[[guard:.*]] = load i32, i32* @"??__J?1??f@@YAAAUS@@XZ at 51"
+// CHECK: %[[guard:.*]] = load i32, ptr @"??__J?1??f@@YAAAUS@@XZ at 51"
// CHECK-NEXT: %[[mask:.*]] = and i32 %[[guard]], -2
-// CHECK-NEXT: store i32 %[[mask]], i32* @"??__J?1??f@@YAAAUS@@XZ at 51"
+// CHECK-NEXT: store i32 %[[mask]], ptr @"??__J?1??f@@YAAAUS@@XZ at 51"
// CHECK-NEXT: cleanupret {{.*}} unwind to caller
return s;
}
@@ -54,32 +54,32 @@ extern inline S &f() {
// CHECK-LABEL: define {{.*}} @"?g@@YAAAUS@@XZ"()
extern inline S &g() {
static S s;
-// CHECK: %[[guard:.*]] = load atomic i32, i32* @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA" unordered, align 4
-// CHECK-NEXT: %[[epoch:.*]] = load i32, i32* @_Init_thread_epoch
+// CHECK: %[[guard:.*]] = load atomic i32, ptr @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA" unordered, align 4
+// CHECK-NEXT: %[[epoch:.*]] = load i32, ptr @_Init_thread_epoch
// CHECK-NEXT: %[[cmp:.*]] = icmp sgt i32 %[[guard]], %[[epoch]]
// CHECK-NEXT: br i1 %[[cmp]], label %[[init_attempt:.*]], label %[[init_end:.*]], !prof ![[unlikely_staticlocal:.*]]
//
// CHECK: [[init_attempt]]:
-// CHECK-NEXT: call void @_Init_thread_header(i32* @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA")
-// CHECK-NEXT: %[[guard2:.*]] = load atomic i32, i32* @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA" unordered, align 4
+// CHECK-NEXT: call void @_Init_thread_header(ptr @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA")
+// CHECK-NEXT: %[[guard2:.*]] = load atomic i32, ptr @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA" unordered, align 4
// CHECK-NEXT: %[[cmp2:.*]] = icmp eq i32 %[[guard2]], -1
// CHECK-NEXT: br i1 %[[cmp2]], label %[[init:.*]], label %[[init_end:.*]]
//
// CHECK: [[init]]:
-// CHECK-NEXT: invoke {{.*}} @"??0S@@QAE at XZ"(%struct.S* {{[^,]*}} @"?s@?1??g@@YAAAUS@@XZ at 4U2@A")
+// CHECK-NEXT: invoke {{.*}} @"??0S@@QAE at XZ"(ptr {{[^,]*}} @"?s@?1??g@@YAAAUS@@XZ at 4U2@A")
// CHECK-NEXT: to label %[[invoke_cont:.*]] unwind label %[[lpad:.*]]
//
// CHECK: [[invoke_cont]]:
-// CHECK-NEXT: call i32 @atexit(void ()* @"??__Fs@?1??g@@YAAAUS@@XZ at YAXXZ")
-// CHECK-NEXT: call void @_Init_thread_footer(i32* @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA")
+// CHECK-NEXT: call i32 @atexit(ptr @"??__Fs@?1??g@@YAAAUS@@XZ at YAXXZ")
+// CHECK-NEXT: call void @_Init_thread_footer(ptr @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA")
// CHECK-NEXT: br label %init.end
//
// CHECK: [[init_end]]:
-// CHECK-NEXT: ret %struct.S* @"?s@?1??g@@YAAAUS@@XZ at 4U2@A"
+// CHECK-NEXT: ret ptr @"?s@?1??g@@YAAAUS@@XZ at 4U2@A"
//
// CHECK: [[lpad]]:
// CHECK-NEXT: cleanuppad within none []
-// CHECK: call void @_Init_thread_abort(i32* @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA")
+// CHECK: call void @_Init_thread_abort(ptr @"?$TSS0@?1??g@@YAAAUS@@XZ at 4HA")
// CHECK-NEXT: cleanupret {{.*}} unwind to caller
return s;
}
More information about the cfe-commits
mailing list