[clang] 250620f - [OpaquePtr][AArch64] Use elementtype on ldxr/stxr
Arthur Eubanks via cfe-commits
cfe-commits at lists.llvm.org
Mon Mar 14 10:10:10 PDT 2022
Author: Arthur Eubanks
Date: 2022-03-14T10:09:59-07:00
New Revision: 250620f76e070cbbd4e8511f751f577b6e1633ac
URL: https://github.com/llvm/llvm-project/commit/250620f76e070cbbd4e8511f751f577b6e1633ac
DIFF: https://github.com/llvm/llvm-project/commit/250620f76e070cbbd4e8511f751f577b6e1633ac.diff
LOG: [OpaquePtr][AArch64] Use elementtype on ldxr/stxr
Includes verifier changes checking the elementtype, clang codegen
changes to emit the elementtype, and ISel changes using the elementtype.
Reviewed By: #opaque-pointers, nikic
Differential Revision: https://reviews.llvm.org/D120527
Added:
llvm/test/Bitcode/upgrade-aarch64-ldstxr.bc
llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll
llvm/test/Verifier/aarch64-ldstxr.ll
Modified:
clang/lib/CodeGen/CGBuiltin.cpp
clang/test/CodeGen/arm_acle.c
clang/test/CodeGen/builtins-arm-exclusive.c
clang/test/CodeGenCXX/builtins-arm-exclusive.cpp
llvm/lib/Bitcode/Reader/BitcodeReader.cpp
llvm/lib/IR/Verifier.cpp
llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
Removed:
################################################################################
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index a2699f5b3ea1e..6383dfdd89508 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -9684,23 +9684,26 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ llvm::Type *IntTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
+ llvm::Type *PtrTy = IntTy->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ Val->addParamAttr(
+ 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
+ return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
+ RealResTy);
}
if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
@@ -9748,7 +9751,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
? Intrinsic::aarch64_stlxr
: Intrinsic::aarch64_stxr,
StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ CI->addParamAttr(
+ 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
+ return CI;
}
if (BuiltinID == AArch64::BI__getReg) {
diff --git a/clang/test/CodeGen/arm_acle.c b/clang/test/CodeGen/arm_acle.c
index 99c281633fc3b..350aff5814644 100644
--- a/clang/test/CodeGen/arm_acle.c
+++ b/clang/test/CodeGen/arm_acle.c
@@ -153,10 +153,10 @@ void test_dbg(void) {
// AArch64-NEXT: [[TMP0:%.*]] = bitcast i8* [[P:%.*]] to i32*
// AArch64-NEXT: br label [[DO_BODY_I:%.*]]
// AArch64: do.body.i:
-// AArch64-NEXT: [[LDXR_I:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT: [[LDXR_I:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) [[TMP0]]) [[ATTR3]]
// AArch64-NEXT: [[TMP1:%.*]] = trunc i64 [[LDXR_I]] to i32
// AArch64-NEXT: [[TMP2:%.*]] = zext i32 [[X:%.*]] to i64
-// AArch64-NEXT: [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP2]], i32* [[TMP0]]) [[ATTR3]]
+// AArch64-NEXT: [[STXR_I:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP2]], i32* elementtype(i32) [[TMP0]]) [[ATTR3]]
// AArch64-NEXT: [[TOBOOL_I:%.*]] = icmp ne i32 [[STXR_I]], 0
// AArch64-NEXT: br i1 [[TOBOOL_I]], label [[DO_BODY_I]], label [[__SWP_EXIT:%.*]], [[LOOP6:!llvm.loop !.*]]
// AArch64: __swp.exit:
diff --git a/clang/test/CodeGen/builtins-arm-exclusive.c b/clang/test/CodeGen/builtins-arm-exclusive.c
index c6cf231659ecd..5abe888e50ae5 100644
--- a/clang/test/CodeGen/builtins-arm-exclusive.c
+++ b/clang/test/CodeGen/builtins-arm-exclusive.c
@@ -13,7 +13,7 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: [[INTRES:%.*]] = call i32 @llvm.arm.ldrex.p0i8(i8* %addr)
// CHECK: trunc i32 [[INTRES]] to i8
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
// CHECK-ARM64: trunc i64 [[INTRES]] to i8
sum += __builtin_arm_ldrex((short *)addr);
@@ -22,7 +22,7 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: trunc i32 [[INTRES]] to i16
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i16(i16* [[ADDR16]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) [[ADDR16]])
// CHECK-ARM64: trunc i64 [[INTRES]] to i16
sum += __builtin_arm_ldrex((int *)addr);
@@ -30,7 +30,7 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: call i32 @llvm.arm.ldrex.p0i32(i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* [[ADDR32]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) [[ADDR32]])
// CHECK-ARM64: trunc i64 [[INTRES]] to i32
sum += __builtin_arm_ldrex((long long *)addr);
@@ -39,13 +39,13 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[ADDR64]])
sum += __builtin_arm_ldrex(addr64);
// CHECK: [[ADDR64_AS8:%.*]] = bitcast i64* %addr64 to i8*
// CHECK: call { i32, i32 } @llvm.arm.ldrexd(i8* [[ADDR64_AS8]])
-// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr64)
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr64)
sum += __builtin_arm_ldrex(addrfloat);
// CHECK: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
@@ -53,7 +53,7 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: bitcast i32 [[INTRES]] to float
// CHECK-ARM64: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* [[INTADDR]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) [[INTADDR]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
// CHECK-ARM64: bitcast i32 [[TRUNCRES]] to float
@@ -71,7 +71,7 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: bitcast i64 [[INTRES]] to double
sum += *__builtin_arm_ldrex((int **)addr);
@@ -82,7 +82,7 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to i32**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast i32** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to i32*
sum += __builtin_arm_ldrex((struct Simple **)addr)->a;
@@ -93,7 +93,7 @@ int test_ldrex(char *addr, long long *addr64, float *addrfloat) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to %struct.Simple*
return sum;
}
@@ -106,7 +106,7 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: [[INTRES:%.*]] = call i32 @llvm.arm.ldaex.p0i8(i8* %addr)
// CHECK: trunc i32 [[INTRES]] to i8
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
// CHECK-ARM64: trunc i64 [[INTRES]] to i8
sum += __builtin_arm_ldaex((short *)addr);
@@ -115,7 +115,7 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: trunc i32 [[INTRES]] to i16
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* [[ADDR16]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) [[ADDR16]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i16
sum += __builtin_arm_ldaex((int *)addr);
@@ -123,7 +123,7 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: call i32 @llvm.arm.ldaex.p0i32(i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* [[ADDR32]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) [[ADDR32]])
// CHECK-ARM64: trunc i64 [[INTRES]] to i32
sum += __builtin_arm_ldaex((long long *)addr);
@@ -132,13 +132,13 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: call { i32, i32 } @llvm.arm.ldaexd(i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[ADDR64]])
+// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[ADDR64]])
sum += __builtin_arm_ldaex(addr64);
// CHECK: [[ADDR64_AS8:%.*]] = bitcast i64* %addr64 to i8*
// CHECK: call { i32, i32 } @llvm.arm.ldaexd(i8* [[ADDR64_AS8]])
-// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr64)
+// CHECK-ARM64: call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr64)
sum += __builtin_arm_ldaex(addrfloat);
// CHECK: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
@@ -146,7 +146,7 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK: bitcast i32 [[INTRES]] to float
// CHECK-ARM64: [[INTADDR:%.*]] = bitcast float* %addrfloat to i32*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* [[INTADDR]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) [[INTADDR]])
// CHECK-ARM64: [[TRUNCRES:%.*]] = trunc i64 [[INTRES]] to i32
// CHECK-ARM64: bitcast i32 [[TRUNCRES]] to float
@@ -164,7 +164,7 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: bitcast i64 [[INTRES]] to double
sum += *__builtin_arm_ldaex((int **)addr);
@@ -175,7 +175,7 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to i32**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast i32** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to i32*
sum += __builtin_arm_ldaex((struct Simple **)addr)->a;
@@ -186,7 +186,7 @@ int test_ldaex(char *addr, long long *addr64, float *addrfloat) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
-// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP5]])
+// CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP5]])
// CHECK-ARM64: inttoptr i64 [[INTRES]] to %struct.Simple*
return sum;
}
@@ -199,21 +199,21 @@ int test_strex(char *addr) {
res |= __builtin_arm_strex(4, addr);
// CHECK: call i32 @llvm.arm.strex.p0i8(i32 4, i8* %addr)
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i8(i64 4, i8* %addr)
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i8(i64 4, i8* elementtype(i8) %addr)
res |= __builtin_arm_strex(42, (short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: call i32 @llvm.arm.strex.p0i16(i32 42, i16* [[ADDR16]])
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i16(i64 42, i16* [[ADDR16]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i16(i64 42, i16* elementtype(i16) [[ADDR16]])
res |= __builtin_arm_strex(42, (int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.strex.p0i32(i32 42, i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 42, i32* [[ADDR32]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 42, i32* elementtype(i32) [[ADDR32]])
res |= __builtin_arm_strex(42, (long long *)addr);
// CHECK: store i64 42, i64* [[TMP:%.*]], align 8
@@ -226,7 +226,7 @@ int test_strex(char *addr) {
// CHECK: call i32 @llvm.arm.strexd(i32 [[LO]], i32 [[HI]], i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 42, i64* [[ADDR64]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 42, i64* elementtype(i64) [[ADDR64]])
res |= __builtin_arm_strex(2.71828f, (float *)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to float*
@@ -235,7 +235,7 @@ int test_strex(char *addr) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to float*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast float* [[TMP4]] to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 1076754509, i32* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i32(i64 1076754509, i32* elementtype(i32) [[TMP5]])
res |= __builtin_arm_strex(3.14159, (double *)addr);
// CHECK: store double 3.141590e+00, double* [[TMP:%.*]], align 8
@@ -249,7 +249,7 @@ int test_strex(char *addr) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 4614256650576692846, i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 4614256650576692846, i64* elementtype(i64) [[TMP5]])
res |= __builtin_arm_strex(&var, (struct Simple **)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
@@ -260,7 +260,7 @@ int test_strex(char *addr) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
// CHECK-ARM64: [[INTVAL:%.*]] = ptrtoint %struct.Simple* %var to i64
-// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 [[INTVAL]], i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stxr.p0i64(i64 [[INTVAL]], i64* elementtype(i64) [[TMP5]])
return res;
}
@@ -273,21 +273,21 @@ int test_stlex(char *addr) {
res |= __builtin_arm_stlex(4, addr);
// CHECK: call i32 @llvm.arm.stlex.p0i8(i32 4, i8* %addr)
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i8(i64 4, i8* %addr)
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i8(i64 4, i8* elementtype(i8) %addr)
res |= __builtin_arm_stlex(42, (short *)addr);
// CHECK: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
// CHECK: call i32 @llvm.arm.stlex.p0i16(i32 42, i16* [[ADDR16]])
// CHECK-ARM64: [[ADDR16:%.*]] = bitcast i8* %addr to i16*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i16(i64 42, i16* [[ADDR16]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i16(i64 42, i16* elementtype(i16) [[ADDR16]])
res |= __builtin_arm_stlex(42, (int *)addr);
// CHECK: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
// CHECK: call i32 @llvm.arm.stlex.p0i32(i32 42, i32* [[ADDR32]])
// CHECK-ARM64: [[ADDR32:%.*]] = bitcast i8* %addr to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 42, i32* [[ADDR32]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 42, i32* elementtype(i32) [[ADDR32]])
res |= __builtin_arm_stlex(42, (long long *)addr);
// CHECK: store i64 42, i64* [[TMP:%.*]], align 8
@@ -300,7 +300,7 @@ int test_stlex(char *addr) {
// CHECK: call i32 @llvm.arm.stlexd(i32 [[LO]], i32 [[HI]], i8* [[TMP5]])
// CHECK-ARM64: [[ADDR64:%.*]] = bitcast i8* %addr to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 42, i64* [[ADDR64]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 42, i64* elementtype(i64) [[ADDR64]])
res |= __builtin_arm_stlex(2.71828f, (float *)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to float*
@@ -309,7 +309,7 @@ int test_stlex(char *addr) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to float*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast float* [[TMP4]] to i32*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 1076754509, i32* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i32(i64 1076754509, i32* elementtype(i32) [[TMP5]])
res |= __builtin_arm_stlex(3.14159, (double *)addr);
// CHECK: store double 3.141590e+00, double* [[TMP:%.*]], align 8
@@ -323,7 +323,7 @@ int test_stlex(char *addr) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to double*
// CHECK-ARM64: [[TMP5:%.*]] = bitcast double* [[TMP4]] to i64*
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 4614256650576692846, i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 4614256650576692846, i64* elementtype(i64) [[TMP5]])
res |= __builtin_arm_stlex(&var, (struct Simple **)addr);
// CHECK: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
@@ -334,7 +334,7 @@ int test_stlex(char *addr) {
// CHECK-ARM64: [[TMP4:%.*]] = bitcast i8* %addr to %struct.Simple**
// CHECK-ARM64: [[TMP5:%.*]] = bitcast %struct.Simple** [[TMP4]] to i64*
// CHECK-ARM64: [[INTVAL:%.*]] = ptrtoint %struct.Simple* %var to i64
-// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 [[INTVAL]], i64* [[TMP5]])
+// CHECK-ARM64: call i32 @llvm.aarch64.stlxr.p0i64(i64 [[INTVAL]], i64* elementtype(i64) [[TMP5]])
return res;
}
diff --git a/clang/test/CodeGenCXX/builtins-arm-exclusive.cpp b/clang/test/CodeGenCXX/builtins-arm-exclusive.cpp
index 06f7a023adec1..05cb330a4ddae 100644
--- a/clang/test/CodeGenCXX/builtins-arm-exclusive.cpp
+++ b/clang/test/CodeGenCXX/builtins-arm-exclusive.cpp
@@ -7,7 +7,7 @@ bool b;
// CHECK: call i32 @llvm.arm.ldrex.p0i8(i8* @b)
// CHECK-ARM64-LABEL: @_Z10test_ldrexv()
-// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i8(i8* @b)
+// CHECK-ARM64: call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) @b)
void test_ldrex() {
b = __builtin_arm_ldrex(&b);
@@ -17,7 +17,7 @@ void test_ldrex() {
// CHECK: %{{.*}} = call i32 @llvm.arm.strex.p0i8(i32 1, i8* @b)
// CHECK-ARM64-LABEL: @_Z10tset_strexv()
-// CHECK-ARM64: %{{.*}} = call i32 @llvm.aarch64.stxr.p0i8(i64 1, i8* @b)
+// CHECK-ARM64: %{{.*}} = call i32 @llvm.aarch64.stxr.p0i8(i64 1, i8* elementtype(i8) @b)
void tset_strex() {
__builtin_arm_strex(true, &b);
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 5a667f55948f0..3ad5fd7171db8 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -51,6 +51,7 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
@@ -4140,14 +4141,23 @@ Error BitcodeReader::propagateAttributeTypes(CallBase *CB,
switch (CB->getIntrinsicID()) {
case Intrinsic::preserve_array_access_index:
case Intrinsic::preserve_struct_access_index:
- if (!Attrs.getParamElementType(0)) {
- Type *ElTy = getPtrElementTypeByID(ArgTyIDs[0]);
+ case Intrinsic::aarch64_ldaxr:
+ case Intrinsic::aarch64_ldxr:
+ case Intrinsic::aarch64_stlxr:
+ case Intrinsic::aarch64_stxr: {
+ unsigned ArgNo = CB->getIntrinsicID() == Intrinsic::aarch64_stlxr ||
+ CB->getIntrinsicID() == Intrinsic::aarch64_stxr
+ ? 1
+ : 0;
+ if (!Attrs.getParamElementType(ArgNo)) {
+ Type *ElTy = getPtrElementTypeByID(ArgTyIDs[ArgNo]);
if (!ElTy)
return error("Missing element type for elementtype upgrade");
Attribute NewAttr = Attribute::get(Context, Attribute::ElementType, ElTy);
- Attrs = Attrs.addParamAttribute(Context, 0, NewAttr);
+ Attrs = Attrs.addParamAttribute(Context, ArgNo, NewAttr);
}
break;
+ }
default:
break;
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index bc6f6757311f1..632cc0af3846c 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -84,6 +84,7 @@
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/IntrinsicsAArch64.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Metadata.h"
@@ -5520,13 +5521,23 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
break;
}
case Intrinsic::preserve_array_access_index:
- case Intrinsic::preserve_struct_access_index: {
+ case Intrinsic::preserve_struct_access_index:
+ case Intrinsic::aarch64_ldaxr:
+ case Intrinsic::aarch64_ldxr: {
Type *ElemTy = Call.getParamElementType(0);
Assert(ElemTy,
"Intrinsic requires elementtype attribute on first argument.",
&Call);
break;
}
+ case Intrinsic::aarch64_stlxr:
+ case Intrinsic::aarch64_stxr: {
+ Type *ElemTy = Call.getAttributes().getParamElementType(1);
+ Assert(ElemTy,
+ "Intrinsic requires elementtype attribute on second argument.",
+ &Call);
+ break;
+ }
};
}
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 6d2dfd89af298..0f2d7997c870b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11984,23 +11984,23 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
}
case Intrinsic::aarch64_ldaxr:
case Intrinsic::aarch64_ldxr: {
- PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
+ Type *ValTy = I.getParamElementType(0);
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
+ Info.memVT = MVT::getVT(ValTy);
Info.ptrVal = I.getArgOperand(0);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
+ Info.align = DL.getABITypeAlign(ValTy);
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
return true;
}
case Intrinsic::aarch64_stlxr:
case Intrinsic::aarch64_stxr: {
- PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
+ Type *ValTy = I.getParamElementType(1);
Info.opc = ISD::INTRINSIC_W_CHAIN;
- Info.memVT = MVT::getVT(PtrTy->getPointerElementType());
+ Info.memVT = MVT::getVT(ValTy);
Info.ptrVal = I.getArgOperand(1);
Info.offset = 0;
- Info.align = DL.getABITypeAlign(PtrTy->getPointerElementType());
+ Info.align = DL.getABITypeAlign(ValTy);
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
return true;
}
@@ -19265,7 +19265,10 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilderBase &Builder,
const DataLayout &DL = M->getDataLayout();
IntegerType *IntEltTy = Builder.getIntNTy(DL.getTypeSizeInBits(ValueTy));
- Value *Trunc = Builder.CreateTrunc(Builder.CreateCall(Ldxr, Addr), IntEltTy);
+ CallInst *CI = Builder.CreateCall(Ldxr, Addr);
+ CI->addParamAttr(
+ 0, Attribute::get(Builder.getContext(), Attribute::ElementType, ValueTy));
+ Value *Trunc = Builder.CreateTrunc(CI, IntEltTy);
return Builder.CreateBitCast(Trunc, ValueTy);
}
@@ -19306,10 +19309,13 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilderBase &Builder,
IntegerType *IntValTy = Builder.getIntNTy(DL.getTypeSizeInBits(Val->getType()));
Val = Builder.CreateBitCast(Val, IntValTy);
- return Builder.CreateCall(Stxr,
- {Builder.CreateZExtOrBitCast(
- Val, Stxr->getFunctionType()->getParamType(0)),
- Addr});
+ CallInst *CI = Builder.CreateCall(
+ Stxr, {Builder.CreateZExtOrBitCast(
+ Val, Stxr->getFunctionType()->getParamType(0)),
+ Addr});
+ CI->addParamAttr(1, Attribute::get(Builder.getContext(),
+ Attribute::ElementType, Val->getType()));
+ return CI;
}
bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters(
diff --git a/llvm/test/Bitcode/upgrade-aarch64-ldstxr.bc b/llvm/test/Bitcode/upgrade-aarch64-ldstxr.bc
new file mode 100644
index 0000000000000..3ac94f517c4a5
Binary files /dev/null and b/llvm/test/Bitcode/upgrade-aarch64-ldstxr.bc
diff er
diff --git a/llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll b/llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll
new file mode 100644
index 0000000000000..adc39fb392793
--- /dev/null
+++ b/llvm/test/Bitcode/upgrade-aarch64-ldstxr.ll
@@ -0,0 +1,19 @@
+; RUN: llvm-dis < %S/upgrade-aarch64-ldstxr.bc | FileCheck %s
+
+define void @f(i32* %p) {
+; CHECK: call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32)
+ %a = call i64 @llvm.aarch64.ldxr.p0i32(i32* %p)
+; CHECK: call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* elementtype(i32)
+ %c = call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* %p)
+
+; CHECK: call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32)
+ %a2 = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %p)
+; CHECK: call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* elementtype(i32)
+ %c2 = call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* %p)
+ ret void
+}
+
+declare i64 @llvm.aarch64.ldxr.p0i32(i32*)
+declare i64 @llvm.aarch64.ldaxr.p0i32(i32*)
+declare i32 @llvm.aarch64.stxr.p0i32(i64, i32*)
+declare i32 @llvm.aarch64.stlxr.p0i32(i64, i32*)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
index 22f8a5d911f2d..ef559652380e3 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll
@@ -1785,7 +1785,7 @@ define i32 @test_target_mem_intrinsic(i32* %addr) {
; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY $x0
; CHECK: [[VAL:%[0-9]+]]:_(s64) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.aarch64.ldxr), [[ADDR]](p0) :: (volatile load (s32) from %ir.addr)
; CHECK: G_TRUNC [[VAL]](s64)
- %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
%trunc = trunc i64 %val to i32
ret i32 %trunc
}
diff --git a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
index f5beaebe50cb5..014d7b6fd62b8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-ldxr-stxr.ll
@@ -45,7 +45,7 @@ define dso_local void @test_load_i8(i8* %addr) {
; GISEL: ldxrb w[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
%shortval = trunc i64 %val to i8
%extval = zext i8 %shortval to i64
store i64 %extval, i64* @var, align 8
@@ -64,7 +64,7 @@ define dso_local void @test_load_i16(i16* %addr) {
; GISEL: ldxrh w[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) %addr)
%shortval = trunc i64 %val to i16
%extval = zext i16 %shortval to i64
store i64 %extval, i64* @var, align 8
@@ -83,7 +83,7 @@ define dso_local void @test_load_i32(i32* %addr) {
; GISEL: ldxr w[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
%shortval = trunc i64 %val to i32
%extval = zext i32 %shortval to i64
store i64 %extval, i64* @var, align 8
@@ -100,7 +100,7 @@ define dso_local void @test_load_i64(i64* %addr) {
; GISEL: ldxr x[[LOADVAL:[0-9]+]], [x0]
; GISEL-NOT: uxtb
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr)
store i64 %val, i64* @var, align 8
ret void
}
@@ -122,7 +122,7 @@ define dso_local i32 @test_store_i8(i32, i8 %val, i8* %addr) {
; GISEL-NOT: and
; GISEL: stxrb w0, w1, [x2]
%extval = zext i8 %val to i64
- %res = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %res
}
@@ -137,7 +137,7 @@ define dso_local i32 @test_store_i16(i32, i16 %val, i16* %addr) {
; GISEL-NOT: and
; GISEL: stxrh w0, w1, [x2]
%extval = zext i16 %val to i64
- %res = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %res
}
@@ -152,7 +152,7 @@ define dso_local i32 @test_store_i32(i32, i32 %val, i32* %addr) {
; GISEL-NOT: and
; GISEL: stxr w0, w1, [x2]
%extval = zext i32 %val to i64
- %res = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %res
}
@@ -162,7 +162,7 @@ define dso_local i32 @test_store_i64(i32, i64 %val, i64* %addr) {
; CHECK: stxr w0, x1, [x2]
; GISEL-LABEL: test_store_i64:
; GISEL: stxr w0, x1, [x2]
- %res = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* %addr)
+ %res = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %res
}
@@ -219,7 +219,7 @@ define dso_local void @test_load_acquire_i8(i8* %addr) {
; GISEL-LABEL: test_load_acquire_i8:
; GISEL: ldaxrb w[[LOADVAL:[0-9]+]], [x0]
; GISEL-DAG: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
%shortval = trunc i64 %val to i8
%extval = zext i8 %shortval to i64
store i64 %extval, i64* @var, align 8
@@ -237,7 +237,7 @@ define dso_local void @test_load_acquire_i16(i16* %addr) {
; GISEL-LABEL: test_load_acquire_i16:
; GISEL: ldaxrh w[[LOADVAL:[0-9]+]], [x0]
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) %addr)
%shortval = trunc i64 %val to i16
%extval = zext i16 %shortval to i64
store i64 %extval, i64* @var, align 8
@@ -255,7 +255,7 @@ define dso_local void @test_load_acquire_i32(i32* %addr) {
; GISEL-LABEL: test_load_acquire_i32:
; GISEL: ldaxr w[[LOADVAL:[0-9]+]], [x0]
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) %addr)
%shortval = trunc i64 %val to i32
%extval = zext i32 %shortval to i64
store i64 %extval, i64* @var, align 8
@@ -271,7 +271,7 @@ define dso_local void @test_load_acquire_i64(i64* %addr) {
; GISEL-LABEL: test_load_acquire_i64:
; GISEL: ldaxr x[[LOADVAL:[0-9]+]], [x0]
; GISEL: str x[[LOADVAL]], [{{x[0-9]+}}, :lo12:var]
- %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr)
store i64 %val, i64* @var, align 8
ret void
}
@@ -293,7 +293,7 @@ define dso_local i32 @test_store_release_i8(i32, i8 %val, i8* %addr) {
; GISEL-NOT: and
; GISEL: stlxrb w0, w1, [x2]
%extval = zext i8 %val to i64
- %res = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %res
}
@@ -308,7 +308,7 @@ define dso_local i32 @test_store_release_i16(i32, i16 %val, i16* %addr) {
; GISEL-NOT: and
; GISEL: stlxrh w0, w1, [x2]
%extval = zext i16 %val to i64
- %res = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %res
}
@@ -323,7 +323,7 @@ define dso_local i32 @test_store_release_i32(i32, i32 %val, i32* %addr) {
; GISEL-NOT: and
; GISEL: stlxr w0, w1, [x2]
%extval = zext i32 %val to i64
- %res = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %res
}
@@ -333,7 +333,7 @@ define dso_local i32 @test_store_release_i64(i32, i64 %val, i64* %addr) {
; CHECK: stlxr w0, x1, [x2]
; GISEL-LABEL: test_store_release_i64:
; GISEL: stlxr w0, x1, [x2]
- %res = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* %addr)
+ %res = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %res
}
diff --git a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
index 5e16a9a31ea65..6e655a0cc167a 100644
--- a/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
+++ b/llvm/test/CodeGen/AArch64/arm64_32-atomics.ll
@@ -80,7 +80,7 @@ define i8 @test_ldxr_8(i8* %addr) {
; CHECK-LABEL: test_ldxr_8:
; CHECK: ldxrb w0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i8(i8* elementtype(i8) %addr)
%val8 = trunc i64 %val to i8
ret i8 %val8
}
@@ -89,7 +89,7 @@ define i16 @test_ldxr_16(i16* %addr) {
; CHECK-LABEL: test_ldxr_16:
; CHECK: ldxrh w0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i16(i16* elementtype(i16) %addr)
%val16 = trunc i64 %val to i16
ret i16 %val16
}
@@ -98,7 +98,7 @@ define i32 @test_ldxr_32(i32* %addr) {
; CHECK-LABEL: test_ldxr_32:
; CHECK: ldxr w0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i32(i32* elementtype(i32) %addr)
%val32 = trunc i64 %val to i32
ret i32 %val32
}
@@ -107,7 +107,7 @@ define i64 @test_ldxr_64(i64* %addr) {
; CHECK-LABEL: test_ldxr_64:
; CHECK: ldxr x0, [x0]
- %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldxr.p0i64(i64* elementtype(i64) %addr)
ret i64 %val
}
@@ -120,7 +120,7 @@ define i8 @test_ldaxr_8(i8* %addr) {
; CHECK-LABEL: test_ldaxr_8:
; CHECK: ldaxrb w0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i8(i8* elementtype(i8) %addr)
%val8 = trunc i64 %val to i8
ret i8 %val8
}
@@ -129,7 +129,7 @@ define i16 @test_ldaxr_16(i16* %addr) {
; CHECK-LABEL: test_ldaxr_16:
; CHECK: ldaxrh w0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) %addr)
%val16 = trunc i64 %val to i16
ret i16 %val16
}
@@ -138,7 +138,7 @@ define i32 @test_ldaxr_32(i32* %addr) {
; CHECK-LABEL: test_ldaxr_32:
; CHECK: ldaxr w0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) %addr)
%val32 = trunc i64 %val to i32
ret i32 %val32
}
@@ -147,7 +147,7 @@ define i64 @test_ldaxr_64(i64* %addr) {
; CHECK-LABEL: test_ldaxr_64:
; CHECK: ldaxr x0, [x0]
- %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* %addr)
+ %val = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) %addr)
ret i64 %val
}
@@ -162,7 +162,7 @@ define i32 @test_stxr_8(i8* %addr, i8 %val) {
; CHECK: mov w0, [[TMP]]
%extval = zext i8 %val to i64
- %success = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %success
}
@@ -172,7 +172,7 @@ define i32 @test_stxr_16(i16* %addr, i16 %val) {
; CHECK: mov w0, [[TMP]]
%extval = zext i16 %val to i64
- %success = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %success
}
@@ -182,7 +182,7 @@ define i32 @test_stxr_32(i32* %addr, i32 %val) {
; CHECK: mov w0, [[TMP]]
%extval = zext i32 %val to i64
- %success = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %success
}
@@ -191,7 +191,7 @@ define i32 @test_stxr_64(i64* %addr, i64 %val) {
; CHECK: stxr [[TMP:w[0-9]+]], x1, [x0]
; CHECK: mov w0, [[TMP]]
- %success = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* %addr)
+ %success = call i32 @llvm.aarch64.stxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %success
}
@@ -206,7 +206,7 @@ define i32 @test_stlxr_8(i8* %addr, i8 %val) {
; CHECK: mov w0, [[TMP]]
%extval = zext i8 %val to i64
- %success = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i8(i64 %extval, i8* elementtype(i8) %addr)
ret i32 %success
}
@@ -216,7 +216,7 @@ define i32 @test_stlxr_16(i16* %addr, i16 %val) {
; CHECK: mov w0, [[TMP]]
%extval = zext i16 %val to i64
- %success = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i16(i64 %extval, i16* elementtype(i16) %addr)
ret i32 %success
}
@@ -226,7 +226,7 @@ define i32 @test_stlxr_32(i32* %addr, i32 %val) {
; CHECK: mov w0, [[TMP]]
%extval = zext i32 %val to i64
- %success = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i32(i64 %extval, i32* elementtype(i32) %addr)
ret i32 %success
}
@@ -235,7 +235,7 @@ define i32 @test_stlxr_64(i64* %addr, i64 %val) {
; CHECK: stlxr [[TMP:w[0-9]+]], x1, [x0]
; CHECK: mov w0, [[TMP]]
- %success = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* %addr)
+ %success = call i32 @llvm.aarch64.stlxr.p0i64(i64 %val, i64* elementtype(i64) %addr)
ret i32 %success
}
diff --git a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
index 065878223e6a7..2aafd2a921aa8 100644
--- a/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
+++ b/llvm/test/Transforms/AtomicExpand/AArch64/expand-atomicrmw-xchg-fp.ll
@@ -8,10 +8,10 @@ define void @atomic_swap_f16(half* %ptr, half %val) nounwind {
; CHECK-NEXT: [[TMP2:%.*]] = bitcast half [[VAL:%.*]] to i16
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i16(i16* elementtype(i16) [[TMP1]])
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i16
; CHECK-NEXT: [[TMP5:%.*]] = zext i16 [[TMP2]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i16(i64 [[TMP5]], i16* [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i16(i64 [[TMP5]], i16* elementtype(i16) [[TMP1]])
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
@@ -35,10 +35,10 @@ define void @atomic_swap_f32(float* %ptr, float %val) nounwind {
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float [[VAL:%.*]] to i32
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i32(i32* elementtype(i32) [[TMP1]])
; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; CHECK-NEXT: [[TMP5:%.*]] = zext i32 [[TMP2]] to i64
-; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP5]], i32* [[TMP1]])
+; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.aarch64.stxr.p0i32(i64 [[TMP5]], i32* elementtype(i32) [[TMP1]])
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP6]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
@@ -62,8 +62,8 @@ define void @atomic_swap_f64(double* %ptr, double %val) nounwind {
; CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[VAL:%.*]] to i64
; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
; CHECK: atomicrmw.start:
-; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* [[TMP1]])
-; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[TMP2]], i64* [[TMP1]])
+; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.aarch64.ldaxr.p0i64(i64* elementtype(i64) [[TMP1]])
+; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[TMP2]], i64* elementtype(i64) [[TMP1]])
; CHECK-NEXT: [[TRYAGAIN:%.*]] = icmp ne i32 [[TMP4]], 0
; CHECK-NEXT: br i1 [[TRYAGAIN]], label [[ATOMICRMW_START]], label [[ATOMICRMW_END:%.*]]
; CHECK: atomicrmw.end:
diff --git a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
index 92007ef424132..e1333ace27d77 100644
--- a/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
+++ b/llvm/test/Transforms/ConstantHoisting/AArch64/const-hoist-intrinsics.ll
@@ -9,26 +9,26 @@ define void @test_stxr(i64* %ptr) {
; CHECK-NEXT: [[CONST:%.*]] = bitcast i64 -9223372036317904832 to i64
; CHECK-NEXT: [[PTR_0:%.*]] = getelementptr i64, i64* [[PTR:%.*]], i64 0
; CHECK-NEXT: [[CONST_MAT:%.*]] = add i64 [[CONST]], -64
-; CHECK-NEXT: [[BAR_0:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT]], i64* [[PTR_0]])
+; CHECK-NEXT: [[BAR_0:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT]], i64* elementtype(i64) [[PTR_0]])
; CHECK-NEXT: [[PTR_1:%.*]] = getelementptr i64, i64* [[PTR]], i64 1
-; CHECK-NEXT: [[BAR_1:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST]], i64* [[PTR_1]])
+; CHECK-NEXT: [[BAR_1:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST]], i64* elementtype(i64) [[PTR_1]])
; CHECK-NEXT: [[PTR_2:%.*]] = getelementptr i64, i64* [[PTR]], i64 2
; CHECK-NEXT: [[CONST_MAT1:%.*]] = add i64 [[CONST]], 64
-; CHECK-NEXT: [[BAR_2:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT1]], i64* [[PTR_2]])
+; CHECK-NEXT: [[BAR_2:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT1]], i64* elementtype(i64) [[PTR_2]])
; CHECK-NEXT: [[PTR_3:%.*]] = getelementptr i64, i64* [[PTR]], i64 3
; CHECK-NEXT: [[CONST_MAT2:%.*]] = add i64 [[CONST]], 128
-; CHECK-NEXT: [[BAR_3:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT2]], i64* [[PTR_3]])
+; CHECK-NEXT: [[BAR_3:%.*]] = call i32 @llvm.aarch64.stxr.p0i64(i64 [[CONST_MAT2]], i64* elementtype(i64) [[PTR_3]])
; CHECK-NEXT: ret void
;
entry:
%ptr.0 = getelementptr i64, i64* %ptr, i64 0
- %bar.0 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904896, i64* %ptr.0)
+ %bar.0 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904896, i64* elementtype(i64) %ptr.0)
%ptr.1 = getelementptr i64, i64* %ptr, i64 1
- %bar.1 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904832, i64* %ptr.1)
+ %bar.1 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904832, i64* elementtype(i64) %ptr.1)
%ptr.2 = getelementptr i64, i64* %ptr, i64 2
- %bar.2 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904768, i64* %ptr.2)
+ %bar.2 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904768, i64* elementtype(i64) %ptr.2)
%ptr.3 = getelementptr i64, i64* %ptr, i64 3
- %bar.3 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904704, i64* %ptr.3)
+ %bar.3 = call i32 @llvm.aarch64.stxr.p0i64(i64 -9223372036317904704, i64* elementtype(i64) %ptr.3)
ret void
}
diff --git a/llvm/test/Verifier/aarch64-ldstxr.ll b/llvm/test/Verifier/aarch64-ldstxr.ll
new file mode 100644
index 0000000000000..753c2ca78ff73
--- /dev/null
+++ b/llvm/test/Verifier/aarch64-ldstxr.ll
@@ -0,0 +1,19 @@
+; RUN: not opt -passes=verify -S < %s 2>&1 | FileCheck %s
+
+define void @f(i32* %p) {
+; CHECK: Intrinsic requires elementtype attribute on first argument
+ %a = call i64 @llvm.aarch64.ldxr.p0i32(i32* %p)
+; CHECK: Intrinsic requires elementtype attribute on second argument
+ %c = call i32 @llvm.aarch64.stxr.p0i32(i64 0, i32* %p)
+
+; CHECK: Intrinsic requires elementtype attribute on first argument
+ %a2 = call i64 @llvm.aarch64.ldaxr.p0i32(i32* %p)
+; CHECK: Intrinsic requires elementtype attribute on second argument
+ %c2 = call i32 @llvm.aarch64.stlxr.p0i32(i64 0, i32* %p)
+ ret void
+}
+
+declare i64 @llvm.aarch64.ldxr.p0i32(i32*)
+declare i64 @llvm.aarch64.ldaxr.p0i32(i32*)
+declare i32 @llvm.aarch64.stxr.p0i32(i64, i32*)
+declare i32 @llvm.aarch64.stlxr.p0i32(i64, i32*)
More information about the cfe-commits
mailing list