[clang] c568927 - [SystemZ] Properly support 16 byte atomic int/fp types and ops. (#73134)
via cfe-commits
cfe-commits at lists.llvm.org
Tue Dec 5 08:17:27 PST 2023
Author: Jonas Paulsson
Date: 2023-12-05T17:17:21+01:00
New Revision: c568927f3e2e7d9804ea74ecbf11c16c014ddcbc
URL: https://github.com/llvm/llvm-project/commit/c568927f3e2e7d9804ea74ecbf11c16c014ddcbc
DIFF: https://github.com/llvm/llvm-project/commit/c568927f3e2e7d9804ea74ecbf11c16c014ddcbc.diff
LOG: [SystemZ] Properly support 16 byte atomic int/fp types and ops. (#73134)
- Clang FE now has MaxAtomicPromoteWidth / MaxAtomicInlineWidth set to 128, and now produces IR
instead of calls to __atomic instrinsics for 16 bytes as well.
- Atomic __int128 (and long double) variables are now aligned to 16 bytes by default (like gcc 14).
- AtomicExpand pass now expands 16 byte operations as well.
- tests for __atomic builtins for all integer widths, and __atomic_is_lock_free with friends.
- TODO: AtomicExpand pass handles with this patch expansion of i128 atomicrmw:s. As a next step
smaller integer types should also be possible to handle this way instead of by the backend.
Added:
clang/test/CodeGen/SystemZ/atomic_is_lock_free.c
clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c
clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c
clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c
clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c
clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c
clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c
Modified:
clang/lib/Basic/Targets/SystemZ.h
llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll
llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll
Removed:
################################################################################
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 9ba255745cf2c..e4ec338880f21 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -60,7 +60,7 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
"-v128:64-a:8:16-n32:64");
}
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 128;
HasStrictFP = true;
}
diff --git a/clang/test/CodeGen/SystemZ/atomic_is_lock_free.c b/clang/test/CodeGen/SystemZ/atomic_is_lock_free.c
new file mode 100644
index 0000000000000..32c436eaf36dd
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/atomic_is_lock_free.c
@@ -0,0 +1,98 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test __atomic_is_lock_free() and friends.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+typedef __attribute__((aligned(16))) __int128 __int128_Al16;
+
+_Atomic __int128 Int128_Atomic;
+__int128_Al16 Int128_Al16;
+__int128 Int128;
+struct { int I[3]; } _Atomic AtomicStruct;
+_Atomic long double Atomic_fp128; // Also check the alignment of this.
+
+// Check alignments of the variables. @AtomicStruct gets padded and its size
+// and alignment becomes 16. Only a power-of-2 size is considered, so 16 (not
+// 12) needs to be specified with the intrinsics below.
+//
+// CHECK: %struct.anon = type { [3 x i32] }
+// CHECK: @Int128 = {{.*}} i128 0, align 8
+// CHECK: @Int128_Atomic = {{.*}} i128 0, align 16
+// CHECK: @Int128_Al16 = {{.*}} i128 0, align 16
+// CHECK: @AtomicStruct = {{.*}} { %struct.anon, [4 x i8] } zeroinitializer, align 16
+// CHECK: @Atomic_fp128 = {{.*}} fp128 0xL00000000000000000000000000000000, align 16
+
+
+// CHECK-LABEL: @fun0
+// CHECK: ret i1 true
+_Bool fun0() {
+ return __atomic_is_lock_free(16, &Int128_Atomic);
+}
+
+// CHECK-LABEL: @fun1
+// CHECK: ret i1 true
+_Bool fun1() {
+ return __atomic_always_lock_free(16, &Int128_Atomic);
+}
+
+// CHECK-LABEL: @fun2
+// CHECK: ret i1 true
+_Bool fun2() {
+ return __atomic_is_lock_free(16, &Int128_Al16);
+}
+
+// CHECK-LABEL: @fun3
+// CHECK: ret i1 true
+_Bool fun3() {
+ return __atomic_always_lock_free(16, &Int128_Al16);
+}
+
+// CHECK-LABEL: @fun4
+// CHECK: call zeroext i1 @__atomic_is_lock_free
+_Bool fun4() {
+ return __atomic_is_lock_free(16, &Int128);
+}
+
+// CHECK-LABEL: @fun5
+// CHECK: ret i1 false
+_Bool fun5() {
+ return __atomic_always_lock_free(16, &Int128);
+}
+
+// CHECK-LABEL: @fun6
+// CHECK: ret i1 true
+_Bool fun6() {
+ return __atomic_is_lock_free(16, 0);
+}
+
+// CHECK-LABEL: @fun7
+// CHECK: ret i1 true
+_Bool fun7() {
+ return __atomic_always_lock_free(16, 0);
+}
+
+// CHECK-LABEL: @fun8
+// CHECK: ret i1 true
+_Bool fun8() {
+ return __atomic_is_lock_free(16, &AtomicStruct);
+}
+
+// CHECK-LABEL: @fun9
+// CHECK: ret i1 true
+_Bool fun9() {
+ return __atomic_always_lock_free(16, &AtomicStruct);
+}
+
+// CHECK-LABEL: @fun10
+// CHECK: ret i1 true
+_Bool fun10() {
+ return atomic_is_lock_free(&Int128_Atomic);
+}
+
+// CHECK-LABEL: @fun11
+// CHECK: ret i1 true
+_Bool fun11() {
+ return __c11_atomic_is_lock_free(16);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c
new file mode 100644
index 0000000000000..e3db2063312d2
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c
@@ -0,0 +1,257 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for __int128 aligned to 16 bytes, which should be
+// expanded to LLVM I/R by the front end.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+__int128 Ptr __attribute__((aligned(16)));
+__int128 Ret __attribute__((aligned(16)));
+__int128 Val __attribute__((aligned(16)));
+__int128 Exp __attribute__((aligned(16)));
+__int128 Des __attribute__((aligned(16)));
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT: ret void
+//
+__int128 f1() {
+ return __atomic_load_n(&Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP0]], ptr @Ret, align 16
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f2() {
+ __atomic_load(&Ptr, &Ret, memory_order_seq_cst);
+ return Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
+// CHECK-NEXT: ret void
+//
+void f3() {
+ __atomic_store_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16
+// CHECK-NEXT: store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
+// CHECK-NEXT: ret void
+//
+void f4() {
+ __atomic_store(&Ptr, &Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f5() {
+ return __atomic_exchange_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr @Ret, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f6() {
+ __atomic_exchange(&Ptr, &Val, &Ret, memory_order_seq_cst);
+ return Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Des, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Exp, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP1]], i128 [[TMP0]] seq_cst seq_cst, align 16
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
+// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
+// CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP3]]
+//
+_Bool f7() {
+ return __atomic_compare_exchange_n(&Ptr, &Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Exp, align 16
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr @Des, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 16
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
+// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
+// CHECK-NEXT: store i128 [[TMP4]], ptr @Exp, align 16
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP3]]
+//
+_Bool f8() {
+ return __atomic_compare_exchange(&Ptr, &Exp, &Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f9() {
+ return __atomic_add_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f10() {
+ return __atomic_sub_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f11() {
+ return __atomic_and_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f12() {
+ return __atomic_xor_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f13() {
+ return __atomic_or_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1
+// CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f14() {
+ return __atomic_nand_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f15() {
+ return __atomic_fetch_add(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f16() {
+ return __atomic_fetch_sub(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f17() {
+ return __atomic_fetch_and(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f18() {
+ return __atomic_fetch_xor(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f19() {
+ return __atomic_fetch_or(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f20() {
+ return __atomic_fetch_nand(&Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c
new file mode 100644
index 0000000000000..e38e6572bd58f
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c
@@ -0,0 +1,301 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for __int128 (with default alignment of 8 bytes
+// only), resulting in libcalls.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+__int128 Ptr;
+__int128 Ret;
+__int128 Val;
+__int128 Exp;
+__int128 Des;
+
+// TODO: This test and several more below have the unnecessary use of an alloca
+// remaining. This is due to 369c9b7, which changes the behavior of the MemCpyOpt
+// pass. It seems that a 'writable' attribute should now be added to the argument
+// in order for this optimization to proceed.
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: call void @__atomic_load(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull [[ATOMIC_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 8, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f1() {
+ return __atomic_load_n(&Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void @__atomic_load(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Ret, i32 noundef signext 5)
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Ret, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f2() {
+ __atomic_load(&Ptr, &Ret, memory_order_seq_cst);
+ return Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[DOTATOMICTMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_store(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull [[DOTATOMICTMP]], i32 noundef signext 5)
+// CHECK-NEXT: ret void
+//
+void f3() {
+ __atomic_store_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void @__atomic_store(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Val, i32 noundef signext 5)
+// CHECK-NEXT: ret void
+//
+void f4() {
+ __atomic_store(&Ptr, &Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[DOTATOMICTMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull [[DOTATOMICTMP]], ptr noundef nonnull [[ATOMIC_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f5() {
+ return __atomic_exchange_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: tail call void @__atomic_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Val, ptr noundef nonnull @Ret, i32 noundef signext 5)
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Ret, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f6() {
+ __atomic_exchange(&Ptr, &Val, &Ret, memory_order_seq_cst);
+ return Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Des, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[DOTATOMICTMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Exp, ptr noundef nonnull [[DOTATOMICTMP]], i32 noundef signext 5, i32 noundef signext 5)
+// CHECK-NEXT: ret i1 [[CALL]]
+//
+_Bool f7() {
+ return __atomic_compare_exchange_n(&Ptr, &Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[CALL:%.*]] = tail call zeroext i1 @__atomic_compare_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Exp, ptr noundef nonnull @Des, i32 noundef signext 5, i32 noundef signext 5)
+// CHECK-NEXT: ret i1 [[CALL]]
+//
+_Bool f8() {
+ return __atomic_compare_exchange(&Ptr, &Exp, &Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_add_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f9() {
+ return __atomic_add_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_sub_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f10() {
+ return __atomic_sub_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_and_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f11() {
+ return __atomic_and_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_xor_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f12() {
+ return __atomic_xor_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_or_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f13() {
+ return __atomic_or_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_nand_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT: [[TMP3:%.*]] = xor i128 [[TMP2]], -1
+// CHECK-NEXT: store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f14() {
+ return __atomic_nand_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_add_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f15() {
+ return __atomic_fetch_add(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_sub_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f16() {
+ return __atomic_fetch_sub(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_and_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f17() {
+ return __atomic_fetch_and(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_xor_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f18() {
+ return __atomic_fetch_xor(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_or_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f19() {
+ return __atomic_fetch_or(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: call void @__atomic_fetch_nand_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT: [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT: ret void
+//
+__int128 f20() {
+ return __atomic_fetch_nand(&Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c
new file mode 100644
index 0000000000000..7c6a82f14197a
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int16_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f1(int16_t *Ptr) {
+ return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT: store i16 [[TMP0]], ptr [[RET:%.*]], align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f2(int16_t *Ptr, int16_t *Ret) {
+ __atomic_load(Ptr, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: store atomic i16 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret void
+//
+void f3(int16_t *Ptr, int16_t Val) {
+ __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
+// CHECK-NEXT: store atomic i16 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret void
+//
+void f4(int16_t *Ptr, int16_t *Val) {
+ __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f5(int16_t *Ptr, int16_t Val) {
+ return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[TMP0]] seq_cst, align 2
+// CHECK-NEXT: store i16 [[TMP1]], ptr [[RET:%.*]], align 2
+// CHECK-NEXT: ret i16 [[TMP1]]
+//
+int16_t f6(int16_t *Ptr, int16_t *Val, int16_t *Ret) {
+ __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[DES:%.*]] seq_cst seq_cst, align 2
+// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
+// CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
+// CHECK-NEXT: store i16 [[TMP3]], ptr [[EXP]], align 2
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP2]]
+//
+_Bool f7(int16_t *Ptr, int16_t *Exp, int16_t Des) {
+ return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
+// CHECK-NEXT: [[TMP1:%.*]] = load i16, ptr [[DES:%.*]], align 2
+// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[TMP1]] seq_cst seq_cst, align 2
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP2]], 1
+// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i16, i1 } [[TMP2]], 0
+// CHECK-NEXT: store i16 [[TMP4]], ptr [[EXP]], align 2
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP3]]
+//
+_Bool f8(int16_t *Ptr, int16_t *Exp, int16_t *Des) {
+ return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: [[TMP1:%.*]] = add i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i16 [[TMP1]]
+//
+int16_t f9(int16_t *Ptr, int16_t Val) {
+ return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: [[TMP1:%.*]] = sub i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i16 [[TMP1]]
+//
+int16_t f10(int16_t *Ptr, int16_t Val) {
+ return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i16 [[TMP1]]
+//
+int16_t f11(int16_t *Ptr, int16_t Val) {
+ return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i16 [[TMP1]]
+//
+int16_t f12(int16_t *Ptr, int16_t Val) {
+ return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: [[TMP1:%.*]] = or i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i16 [[TMP1]]
+//
+int16_t f13(int16_t *Ptr, int16_t Val) {
+ return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], -1
+// CHECK-NEXT: ret i16 [[TMP2]]
+//
+int16_t f14(int16_t *Ptr, int16_t Val) {
+ return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f15(int16_t *Ptr, int16_t Val) {
+ return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f16(int16_t *Ptr, int16_t Val) {
+ return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f17(int16_t *Ptr, int16_t Val) {
+ return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f18(int16_t *Ptr, int16_t Val) {
+ return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f19(int16_t *Ptr, int16_t Val) {
+ return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT: ret i16 [[TMP0]]
+//
+int16_t f20(int16_t *Ptr, int16_t Val) {
+ return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c
new file mode 100644
index 0000000000000..ba630e7c952e5
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int32_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f1(int32_t *Ptr) {
+ return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i32, ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT: store i32 [[TMP0]], ptr [[RET:%.*]], align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f2(int32_t *Ptr, int32_t *Ret) {
+ __atomic_load(Ptr, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: store atomic i32 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret void
+//
+void f3(int32_t *Ptr, int32_t Val) {
+ __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
+// CHECK-NEXT: store atomic i32 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret void
+//
+void f4(int32_t *Ptr, int32_t *Val) {
+ __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f5(int32_t *Ptr, int32_t Val) {
+ return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[TMP0]] seq_cst, align 4
+// CHECK-NEXT: store i32 [[TMP1]], ptr [[RET:%.*]], align 4
+// CHECK-NEXT: ret i32 [[TMP1]]
+//
+int32_t f6(int32_t *Ptr, int32_t *Val, int32_t *Ret) {
+ __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[EXP:%.*]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i32 [[TMP0]], i32 [[DES:%.*]] seq_cst seq_cst, align 4
+// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
+// CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
+// CHECK-NEXT: store i32 [[TMP3]], ptr [[EXP]], align 4
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP2]]
+//
+_Bool f7(int32_t *Ptr, int32_t *Exp, int32_t Des) {
+ return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[EXP:%.*]], align 4
+// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DES:%.*]], align 4
+// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i32 [[TMP0]], i32 [[TMP1]] seq_cst seq_cst, align 4
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i32, i1 } [[TMP2]], 0
+// CHECK-NEXT: store i32 [[TMP4]], ptr [[EXP]], align 4
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP3]]
+//
+_Bool f8(int32_t *Ptr, int32_t *Exp, int32_t *Des) {
+ return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = add i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i32 [[TMP1]]
+//
+int32_t f9(int32_t *Ptr, int32_t Val) {
+ return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i32 [[TMP1]]
+//
+int32_t f10(int32_t *Ptr, int32_t Val) {
+ return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i32 [[TMP1]]
+//
+int32_t f11(int32_t *Ptr, int32_t Val) {
+ return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i32 [[TMP1]]
+//
+int32_t f12(int32_t *Ptr, int32_t Val) {
+ return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = or i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i32 [[TMP1]]
+//
+int32_t f13(int32_t *Ptr, int32_t Val) {
+ return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: [[TMP1:%.*]] = and i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1
+// CHECK-NEXT: ret i32 [[TMP2]]
+//
+int32_t f14(int32_t *Ptr, int32_t Val) {
+ return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f15(int32_t *Ptr, int32_t Val) {
+ return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f16(int32_t *Ptr, int32_t Val) {
+ return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f17(int32_t *Ptr, int32_t Val) {
+ return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f18(int32_t *Ptr, int32_t Val) {
+ return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f19(int32_t *Ptr, int32_t Val) {
+ return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT: ret i32 [[TMP0]]
+//
+int32_t f20(int32_t *Ptr, int32_t Val) {
+ return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c
new file mode 100644
index 0000000000000..25c69ee8c54bf
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int64_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f1(int64_t *Ptr) {
+ return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT: store i64 [[TMP0]], ptr [[RET:%.*]], align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f2(int64_t *Ptr, int64_t *Ret) {
+ __atomic_load(Ptr, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: store atomic i64 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret void
+//
+void f3(int64_t *Ptr, int64_t Val) {
+ __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8
+// CHECK-NEXT: store atomic i64 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret void
+//
+void f4(int64_t *Ptr, int64_t *Val) {
+ __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f5(int64_t *Ptr, int64_t Val) {
+ return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[TMP0]] seq_cst, align 8
+// CHECK-NEXT: store i64 [[TMP1]], ptr [[RET:%.*]], align 8
+// CHECK-NEXT: ret i64 [[TMP1]]
+//
+int64_t f6(int64_t *Ptr, int64_t *Val, int64_t *Ret) {
+ __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[DES:%.*]] seq_cst seq_cst, align 8
+// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
+// CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
+// CHECK-NEXT: store i64 [[TMP3]], ptr [[EXP]], align 8
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP2]]
+//
+_Bool f7(int64_t *Ptr, int64_t *Exp, int64_t Des) {
+ return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8
+// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[DES:%.*]], align 8
+// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[TMP1]] seq_cst seq_cst, align 8
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+// CHECK-NEXT: store i64 [[TMP4]], ptr [[EXP]], align 8
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP3]]
+//
+_Bool f8(int64_t *Ptr, int64_t *Exp, int64_t *Des) {
+ return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i64 [[TMP1]]
+//
+int64_t f9(int64_t *Ptr, int64_t Val) {
+ return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i64 [[TMP1]]
+//
+int64_t f10(int64_t *Ptr, int64_t Val) {
+ return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i64 [[TMP1]]
+//
+int64_t f11(int64_t *Ptr, int64_t Val) {
+ return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i64 [[TMP1]]
+//
+int64_t f12(int64_t *Ptr, int64_t Val) {
+ return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = or i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i64 [[TMP1]]
+//
+int64_t f13(int64_t *Ptr, int64_t Val) {
+ return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], -1
+// CHECK-NEXT: ret i64 [[TMP2]]
+//
+int64_t f14(int64_t *Ptr, int64_t Val) {
+ return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f15(int64_t *Ptr, int64_t Val) {
+ return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f16(int64_t *Ptr, int64_t Val) {
+ return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f17(int64_t *Ptr, int64_t Val) {
+ return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f18(int64_t *Ptr, int64_t Val) {
+ return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f19(int64_t *Ptr, int64_t Val) {
+ return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT: ret i64 [[TMP0]]
+//
+int64_t f20(int64_t *Ptr, int64_t Val) {
+ return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c
new file mode 100644
index 0000000000000..1f4b455bc0261
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int8_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i8, ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f1(int8_t *Ptr) {
+ return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load atomic i8, ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT: store i8 [[TMP0]], ptr [[RET:%.*]], align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f2(int8_t *Ptr, int8_t *Ret) {
+ __atomic_load(Ptr, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: store atomic i8 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret void
+//
+void f3(int8_t *Ptr, int8_t Val) {
+ __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[VAL:%.*]], align 1
+// CHECK-NEXT: store atomic i8 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret void
+//
+void f4(int8_t *Ptr, int8_t *Val) {
+ __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f5(int8_t *Ptr, int8_t Val) {
+ return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[VAL:%.*]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i8 [[TMP0]] seq_cst, align 1
+// CHECK-NEXT: store i8 [[TMP1]], ptr [[RET:%.*]], align 1
+// CHECK-NEXT: ret i8 [[TMP1]]
+//
+int8_t f6(int8_t *Ptr, int8_t *Val, int8_t *Ret) {
+ __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+ return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[EXP:%.*]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i8 [[TMP0]], i8 [[DES:%.*]] seq_cst seq_cst, align 1
+// CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
+// CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
+// CHECK-NEXT: store i8 [[TMP3]], ptr [[EXP]], align 1
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP2]]
+//
+_Bool f7(int8_t *Ptr, int8_t *Exp, int8_t Des) {
+ return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[EXP:%.*]], align 1
+// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[DES:%.*]], align 1
+// CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i8 [[TMP0]], i8 [[TMP1]] seq_cst seq_cst, align 1
+// CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i8, i1 } [[TMP2]], 1
+// CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK: cmpxchg.store_expected:
+// CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i8, i1 } [[TMP2]], 0
+// CHECK-NEXT: store i8 [[TMP4]], ptr [[EXP]], align 1
+// CHECK-NEXT: br label [[CMPXCHG_CONTINUE]]
+// CHECK: cmpxchg.continue:
+// CHECK-NEXT: ret i1 [[TMP3]]
+//
+_Bool f8(int8_t *Ptr, int8_t *Exp, int8_t *Des) {
+ return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+ memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: [[TMP1:%.*]] = add i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i8 [[TMP1]]
+//
+int8_t f9(int8_t *Ptr, int8_t Val) {
+ return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i8 [[TMP1]]
+//
+int8_t f10(int8_t *Ptr, int8_t Val) {
+ return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i8 [[TMP1]]
+//
+int8_t f11(int8_t *Ptr, int8_t Val) {
+ return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i8 [[TMP1]]
+//
+int8_t f12(int8_t *Ptr, int8_t Val) {
+ return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: [[TMP1:%.*]] = or i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT: ret i8 [[TMP1]]
+//
+int8_t f13(int8_t *Ptr, int8_t Val) {
+ return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], -1
+// CHECK-NEXT: ret i8 [[TMP2]]
+//
+int8_t f14(int8_t *Ptr, int8_t Val) {
+ return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f15(int8_t *Ptr, int8_t Val) {
+ return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f16(int8_t *Ptr, int8_t Val) {
+ return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f17(int8_t *Ptr, int8_t Val) {
+ return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f18(int8_t *Ptr, int8_t Val) {
+ return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f19(int8_t *Ptr, int8_t Val) {
+ return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT: ret i8 [[TMP0]]
+//
+int8_t f20(int8_t *Ptr, int8_t Val) {
+ return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index d0eb0255f7d92..873994c2e333b 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -129,6 +129,8 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
setBooleanContents(ZeroOrOneBooleanContent);
setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
+ setMaxAtomicSizeInBitsSupported(128);
+
// Instructions are strings of 2-byte aligned 2-byte values.
setMinFunctionAlignment(Align(2));
// For performance reasons we prefer 16-byte alignment.
@@ -870,9 +872,11 @@ bool SystemZTargetLowering::hasInlineStackProbe(const MachineFunction &MF) const
TargetLowering::AtomicExpansionKind
SystemZTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
+ // TODO: expand them all here instead of in backend.
return (RMW->isFloatingPointOperation() ||
RMW->getOperation() == AtomicRMWInst::UIncWrap ||
- RMW->getOperation() == AtomicRMWInst::UDecWrap)
+ RMW->getOperation() == AtomicRMWInst::UDecWrap ||
+ RMW->getType()->isIntegerTy(128))
? AtomicExpansionKind::CmpXChg
: AtomicExpansionKind::None;
}
diff --git a/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll b/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll
index 1838b9297ff4c..0e8f044680222 100644
--- a/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll
+++ b/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll
@@ -1,103 +1,511 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; Test i128 atomicrmw operations.
;
; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z15 | FileCheck %s
+;
+; Test expansion of AtomicRMW instructions, which assume a natural alignment.
+; Note that the multiple regmoves inside the CDSG loops hopefully will go away
+; when the new i128 support is added.
; Check register exchange.
-define i128 @f1(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f1:
-; CHECK: brasl %r14, __sync_lock_test_and_set_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_xchg(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_xchg:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r12, %r15, 96(%r15)
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r14, 8(%r4)
+; CHECK-NEXT: lg %r0, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: lgr %r1, %r14
+; CHECK-NEXT: .LBB0_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: lgr %r12, %r5
+; CHECK-NEXT: lgr %r13, %r4
+; CHECK-NEXT: cdsg %r12, %r0, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r13
+; CHECK-NEXT: lgr %r5, %r12
+; CHECK-NEXT: jl .LBB0_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r12, %r15, 96(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw xchg ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check addition of a variable.
-define i128 @f2(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f2:
-; CHECK: brasl %r14, __sync_fetch_and_add_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_add(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_add:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB1_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: algrk %r13, %r4, %r0
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: alcgr %r5, %r1
+; CHECK-NEXT: lgr %r12, %r5
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB1_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw add ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check subtraction of a variable.
-define i128 @f3(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f3:
-; CHECK: brasl %r14, __sync_fetch_and_sub_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_sub(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_sub:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB2_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: slgrk %r13, %r4, %r0
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: slbgr %r5, %r1
+; CHECK-NEXT: lgr %r12, %r5
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB2_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw sub ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check AND of a variable.
-define i128 @f4(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f4:
-; CHECK: brasl %r14, __sync_fetch_and_and_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_and(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_and:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB3_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ngrk %r12, %r5, %r1
+; CHECK-NEXT: ngrk %r13, %r4, %r0
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB3_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw and ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check NAND of a variable.
-define i128 @f5(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f5:
-; CHECK: brasl %r14, __sync_fetch_and_nand_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_nand(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_nand:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB4_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: nngrk %r12, %r5, %r1
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: nngrk %r13, %r4, %r0
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB4_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw nand ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check OR of a variable.
-define i128 @f6(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f6:
-; CHECK: brasl %r14, __sync_fetch_and_or_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_or(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_or:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB5_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: ogrk %r12, %r5, %r1
+; CHECK-NEXT: ogrk %r13, %r4, %r0
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB5_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw or ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check XOR of a variable.
-define i128 @f7(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f7:
-; CHECK: brasl %r14, __sync_fetch_and_xor_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_xor(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_xor:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB6_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: xgrk %r12, %r5, %r1
+; CHECK-NEXT: xgrk %r13, %r4, %r0
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB6_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw xor ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check signed minimum.
-define i128 @f8(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f8:
-; CHECK: brasl %r14, __sync_fetch_and_min_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_min(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_min:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB7_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: clgr %r4, %r0
+; CHECK-NEXT: lhi %r14, 0
+; CHECK-NEXT: lochile %r14, 1
+; CHECK-NEXT: cgr %r5, %r1
+; CHECK-NEXT: lhi %r13, 0
+; CHECK-NEXT: lochile %r13, 1
+; CHECK-NEXT: locrlh %r14, %r13
+; CHECK-NEXT: chi %r14, 0
+; CHECK-NEXT: selgrlh %r13, %r4, %r0
+; CHECK-NEXT: selgrlh %r12, %r5, %r1
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB7_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw min ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check signed maximum.
-define i128 @f9(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f9:
-; CHECK: brasl %r14, __sync_fetch_and_max_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_max(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB8_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: clgr %r4, %r0
+; CHECK-NEXT: lhi %r14, 0
+; CHECK-NEXT: lochih %r14, 1
+; CHECK-NEXT: cgr %r5, %r1
+; CHECK-NEXT: lhi %r13, 0
+; CHECK-NEXT: lochih %r13, 1
+; CHECK-NEXT: locrlh %r14, %r13
+; CHECK-NEXT: chi %r14, 0
+; CHECK-NEXT: selgrlh %r13, %r4, %r0
+; CHECK-NEXT: selgrlh %r12, %r5, %r1
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB8_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw max ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check unsigned minimum.
-define i128 @f10(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f10:
-; CHECK: brasl %r14, __sync_fetch_and_umin_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_umin(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_umin:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB9_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: clgr %r5, %r1
+; CHECK-NEXT: lhi %r14, 0
+; CHECK-NEXT: lochile %r14, 1
+; CHECK-NEXT: clgr %r4, %r0
+; CHECK-NEXT: lhi %r13, 0
+; CHECK-NEXT: lochile %r13, 1
+; CHECK-NEXT: cgr %r5, %r1
+; CHECK-NEXT: locre %r14, %r13
+; CHECK-NEXT: chi %r14, 0
+; CHECK-NEXT: selgrlh %r13, %r4, %r0
+; CHECK-NEXT: selgrlh %r12, %r5, %r1
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB9_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw umin ptr %src, i128 %b seq_cst
ret i128 %res
}
; Check unsigned maximum.
-define i128 @f11(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f11:
-; CHECK: brasl %r14, __sync_fetch_and_umax_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_umax(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_umax:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: .LBB10_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: clgr %r5, %r1
+; CHECK-NEXT: lhi %r14, 0
+; CHECK-NEXT: lochih %r14, 1
+; CHECK-NEXT: clgr %r4, %r0
+; CHECK-NEXT: lhi %r13, 0
+; CHECK-NEXT: lochih %r13, 1
+; CHECK-NEXT: cgr %r5, %r1
+; CHECK-NEXT: locre %r14, %r13
+; CHECK-NEXT: chi %r14, 0
+; CHECK-NEXT: selgrlh %r13, %r4, %r0
+; CHECK-NEXT: selgrlh %r12, %r5, %r1
+; CHECK-NEXT: lgr %r10, %r5
+; CHECK-NEXT: lgr %r11, %r4
+; CHECK-NEXT: cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r11
+; CHECK-NEXT: lgr %r5, %r10
+; CHECK-NEXT: jl .LBB10_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT: br %r14
%res = atomicrmw umax ptr %src, i128 %b seq_cst
ret i128 %res
}
+; Check increment with wraparound.
+define i128 @atomicrmw_uinc_wrap(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_uinc_wrap:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r8, %r15, 64(%r15)
+; CHECK-NEXT: .cfi_offset %r8, -96
+; CHECK-NEXT: .cfi_offset %r9, -88
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r5, 8(%r3)
+; CHECK-NEXT: lg %r14, 0(%r3)
+; CHECK-NEXT: lghi %r4, 0
+; CHECK-NEXT: .LBB11_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: alghsik %r13, %r5, 1
+; CHECK-NEXT: lgr %r12, %r14
+; CHECK-NEXT: lhi %r11, 0
+; CHECK-NEXT: alcgr %r12, %r4
+; CHECK-NEXT: clgr %r14, %r1
+; CHECK-NEXT: lochihe %r11, 1
+; CHECK-NEXT: clgr %r5, %r0
+; CHECK-NEXT: lhi %r10, 0
+; CHECK-NEXT: lochihe %r10, 1
+; CHECK-NEXT: cgr %r14, %r1
+; CHECK-NEXT: locre %r11, %r10
+; CHECK-NEXT: chi %r11, 0
+; CHECK-NEXT: locghilh %r13, 0
+; CHECK-NEXT: locghilh %r12, 0
+; CHECK-NEXT: lgr %r8, %r14
+; CHECK-NEXT: lgr %r9, %r5
+; CHECK-NEXT: cdsg %r8, %r12, 0(%r3)
+; CHECK-NEXT: lgr %r5, %r9
+; CHECK-NEXT: lgr %r14, %r8
+; CHECK-NEXT: jl .LBB11_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r14, 0(%r2)
+; CHECK-NEXT: stg %r5, 8(%r2)
+; CHECK-NEXT: lmg %r8, %r15, 64(%r15)
+; CHECK-NEXT: br %r14
+ %res = atomicrmw uinc_wrap ptr %src, i128 %b seq_cst
+ ret i128 %res
+}
+
+; Check decrement with wraparound.
+define i128 @atomicrmw_udec_wrap(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_udec_wrap:
+; CHECK: # %bb.0:
+; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
+; CHECK-NEXT: .cfi_offset %r6, -112
+; CHECK-NEXT: .cfi_offset %r7, -104
+; CHECK-NEXT: .cfi_offset %r9, -88
+; CHECK-NEXT: .cfi_offset %r10, -80
+; CHECK-NEXT: .cfi_offset %r11, -72
+; CHECK-NEXT: .cfi_offset %r12, -64
+; CHECK-NEXT: .cfi_offset %r13, -56
+; CHECK-NEXT: .cfi_offset %r14, -48
+; CHECK-NEXT: .cfi_offset %r15, -40
+; CHECK-NEXT: lg %r0, 8(%r4)
+; CHECK-NEXT: lg %r1, 0(%r4)
+; CHECK-NEXT: lg %r5, 8(%r3)
+; CHECK-NEXT: lg %r14, 0(%r3)
+; CHECK-NEXT: lghi %r4, -1
+; CHECK-NEXT: .LBB12_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: alghsik %r13, %r5, -1
+; CHECK-NEXT: lgr %r12, %r14
+; CHECK-NEXT: lhi %r10, 0
+; CHECK-NEXT: alcgr %r12, %r4
+; CHECK-NEXT: ogrk %r11, %r5, %r14
+; CHECK-NEXT: lhi %r11, 0
+; CHECK-NEXT: lochie %r11, 1
+; CHECK-NEXT: clgr %r14, %r1
+; CHECK-NEXT: lochih %r10, 1
+; CHECK-NEXT: clgr %r5, %r0
+; CHECK-NEXT: lhi %r9, 0
+; CHECK-NEXT: lochih %r9, 1
+; CHECK-NEXT: cgr %r14, %r1
+; CHECK-NEXT: locre %r10, %r9
+; CHECK-NEXT: or %r11, %r10
+; CHECK-NEXT: selgrl %r11, %r0, %r13
+; CHECK-NEXT: selgrl %r10, %r1, %r12
+; CHECK-NEXT: lgr %r6, %r14
+; CHECK-NEXT: lgr %r7, %r5
+; CHECK-NEXT: cdsg %r6, %r10, 0(%r3)
+; CHECK-NEXT: lgr %r5, %r7
+; CHECK-NEXT: lgr %r14, %r6
+; CHECK-NEXT: jl .LBB12_1
+; CHECK-NEXT: # %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r14, 0(%r2)
+; CHECK-NEXT: stg %r5, 8(%r2)
+; CHECK-NEXT: lmg %r6, %r15, 48(%r15)
+; CHECK-NEXT: br %r14
+ %res = atomicrmw udec_wrap ptr %src, i128 %b seq_cst
+ ret i128 %res
+}
diff --git a/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll b/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll
index 80cc85158e45e..b9e29599af7ee 100644
--- a/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll
+++ b/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll
@@ -2,23 +2,28 @@
;
; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
-define void @f1(ptr %ret, ptr %src, ptr %b) {
+define void @f1(ptr align 16 %ret, ptr align 16 %src, ptr align 16 %b) {
; CHECK-LABEL: f1:
-; CHECK: lg [[RH:%r[0-9]+]], 8(%r4)
-; CHECK: lgr [[RET:%r[0-9]+]], %r2
-; CHECK: lg [[RL:%r[0-9]+]], 0(%r4)
-; CHECK: stg [[RH]], 168(%r15)
-; CHECK: la %r2, 176(%r15)
-; CHECK: la %r4, 160(%r15)
-; CHECK: stg [[RL]], 160(%r15)
-; CHECK: brasl %r14, __sync_lock_test_and_set_16 at PLT
-; CHECK: lg [[RH2:%r[0-9]+]], 184(%r15)
-; CHECK: lg [[RL2:%r[0-9]+]], 176(%r15)
-; CHECK: stg [[RH]], 8([[RET]])
-; CHECK: stg [[RL]], 0([[RET]])
-; CHECK: br %r14
- %val = load fp128, ptr %b, align 8
+; CHECK: lg %r14, 8(%r4)
+; CHECK-NEXT: lg %r0, 0(%r4)
+; CHECK-NEXT: lg %r4, 8(%r3)
+; CHECK-NEXT: lg %r5, 0(%r3)
+; CHECK-NEXT: lgr %r1, %r14
+; CHECK-NEXT:.LBB0_1: # %atomicrmw.start
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: lgr %r12, %r5
+; CHECK-NEXT: lgr %r13, %r4
+; CHECK-NEXT: cdsg %r12, %r0, 0(%r3)
+; CHECK-NEXT: lgr %r4, %r13
+; CHECK-NEXT: lgr %r5, %r12
+; CHECK-NEXT: jl .LBB0_1
+; CHECK-NEXT:# %bb.2: # %atomicrmw.end
+; CHECK-NEXT: stg %r5, 0(%r2)
+; CHECK-NEXT: stg %r4, 8(%r2)
+; CHECK-NEXT: lmg %r12, %r15, 96(%r15)
+; CHECK-NEXT: br %r14
+ %val = load fp128, ptr %b, align 16
%res = atomicrmw xchg ptr %src, fp128 %val seq_cst
- store fp128 %res, ptr %ret, align 8
+ store fp128 %res, ptr %ret, align 16
ret void
}
More information about the cfe-commits
mailing list