[llvm] [clang] [SystemZ] Properly support 16 byte atomic int/fp types and ops. (PR #73134)

Jonas Paulsson via cfe-commits cfe-commits at lists.llvm.org
Tue Nov 28 07:04:15 PST 2023


https://github.com/JonPsson1 updated https://github.com/llvm/llvm-project/pull/73134

>From bf9b6b735c131833ec9457f23b72322fd50ef821 Mon Sep 17 00:00:00 2001
From: Jonas Paulsson <paulsson at linux.vnet.ibm.com>
Date: Fri, 3 Feb 2023 14:32:58 +0100
Subject: [PATCH 1/5] [SystemZ] Improve support for 16 byte atomic int/fp types
 and operations.

- Clang FE now has MaxAtomicPromoteWidth and MaxAtomicInlineWidth with a value
  of 128. It now produces IR instead of calls to __atomic instrinsics for 16
  bytes as well. FP loads are first loaded as i128 and then casted to fp128.
- Atomic __int128 (and long double) variables are aligned to 16 bytes
  (like gcc 14).
- AtomicExpand pass now expands also 16 byte operations.

- tests for __atomic builtins for all integer widths, with test for i128 in
  both align=8 and align=16 cases.
- Resulting behavior of __atomic_is_lock_free / __atomic_always_lock_free /
  __c11_atomic_is_lock_free is tested in gnu-atomic_is_lock_free.c
- shouldExpandAtomicRMWInIR() was already returning true for any FP type. Now
  that the backend is acepting 16 byte atomics, 16 byte FP atomicrmw:s now also
  get expanded by AtomicExpand. The default (and used)
  shouldCastAtomicRMWIInIR() says that if the type is FP, it is casted to
  integer (see atomicrmw-xchg-07.ll).
- TODO: AtomicExpand pass handles with this patch expansion of i128 atomicrmw:s.
  As a next step smaller integer types should also be possible to handle this
  way instead of in backend.

Original patch rebased.
Remove the regalloc handling for CDSG loops.
Tests improved.
---
 clang/lib/Basic/Targets/SystemZ.h             |   2 +-
 clang/test/CodeGen/SystemZ/atomic-alignment.c |  35 ++
 .../SystemZ/gnu-atomic-builtins-i128-16Al.c   | 257 +++++++++
 .../SystemZ/gnu-atomic-builtins-i128-8Al.c    | 301 +++++++++++
 .../CodeGen/SystemZ/gnu-atomic-builtins-i16.c | 219 ++++++++
 .../CodeGen/SystemZ/gnu-atomic-builtins-i32.c | 219 ++++++++
 .../CodeGen/SystemZ/gnu-atomic-builtins-i64.c | 219 ++++++++
 .../CodeGen/SystemZ/gnu-atomic-builtins-i8.c  | 219 ++++++++
 .../gnu-atomic_is_lock_free-i128-16Al.c       |  54 ++
 .../gnu-atomic_is_lock_free-i128-8Al.c        |  28 +
 .../Target/SystemZ/SystemZISelLowering.cpp    |   6 +-
 .../CodeGen/SystemZ/atomicrmw-ops-i128.ll     | 496 ++++++++++++++++--
 .../test/CodeGen/SystemZ/atomicrmw-xchg-07.ll |  37 +-
 13 files changed, 2030 insertions(+), 62 deletions(-)
 create mode 100644 clang/test/CodeGen/SystemZ/atomic-alignment.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
 create mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c

diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 9ba255745cf2cc5..e4ec338880f2109 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -60,7 +60,7 @@ class LLVM_LIBRARY_VISIBILITY SystemZTargetInfo : public TargetInfo {
       resetDataLayout("E-m:e-i1:8:16-i8:8:16-i64:64-f128:64"
                       "-v128:64-a:8:16-n32:64");
     }
-    MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+    MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 128;
     HasStrictFP = true;
   }
 
diff --git a/clang/test/CodeGen/SystemZ/atomic-alignment.c b/clang/test/CodeGen/SystemZ/atomic-alignment.c
new file mode 100644
index 000000000000000..da478842ca31b2b
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/atomic-alignment.c
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O3 -emit-llvm %s -o - | FileCheck %s
+//
+// Test alignment of 128 bit Atomic int/fp types, as well as loading
+// from memory with a simple addition. The fp128 is loaded as i128 and
+// then casted.
+
+// CHECK: @Atomic_int128 = {{.*}} i128 0, align 16
+// CHECK: @Atomic_fp128 = {{.*}} fp128 0xL00000000000000000000000000000000, align 16
+
+// CHECK-LABEL:  @f1
+// CHECK:      %atomic-load = load atomic i128, ptr @Atomic_int128 seq_cst, align 16
+// CHECK-NEXT: %add = add nsw i128 %atomic-load, 1
+// CHECK-NEXT: store i128 %add, ptr %agg.result, align 8
+// CHECK-NEXT: ret void
+
+// CHECK-LABEL:  @f2
+// CHECK:      %atomic-load = load atomic i128, ptr @Atomic_fp128 seq_cst, align 16
+// CHECK-NEXT: %0 = bitcast i128 %atomic-load to fp128
+// CHECK-NEXT: %add = fadd fp128 %0, 0xL00000000000000003FFF000000000000
+// CHECK-NEXT: store fp128 %add, ptr %agg.result, align 8
+// CHECK-NEXT: ret void
+
+
+#include <stdatomic.h>
+
+_Atomic __int128    Atomic_int128;
+_Atomic long double Atomic_fp128;
+
+__int128 f1() {
+  return Atomic_int128 + 1;
+}
+
+long double f2() {
+  return Atomic_fp128 + 1.0;
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c
new file mode 100644
index 000000000000000..e3db2063312d2b4
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-16Al.c
@@ -0,0 +1,257 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for __int128 aligned to 16 bytes, which should be
+// expanded to LLVM I/R by the front end.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+__int128 Ptr __attribute__((aligned(16)));
+__int128 Ret __attribute__((aligned(16)));
+__int128 Val __attribute__((aligned(16)));
+__int128 Exp __attribute__((aligned(16)));
+__int128 Des __attribute__((aligned(16)));
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT:    ret void
+//
+__int128 f1() {
+  return __atomic_load_n(&Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i128, ptr @Ptr seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP0]], ptr @Ret, align 16
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f2() {
+  __atomic_load(&Ptr, &Ret, memory_order_seq_cst);
+  return Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
+// CHECK-NEXT:    ret void
+//
+void f3() {
+  __atomic_store_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16
+// CHECK-NEXT:    store atomic i128 [[TMP0]], ptr @Ptr seq_cst, align 16
+// CHECK-NEXT:    ret void
+//
+void f4() {
+  __atomic_store(&Ptr, &Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f5() {
+  return __atomic_exchange_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr @Ret, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f6() {
+  __atomic_exchange(&Ptr, &Val, &Ret, memory_order_seq_cst);
+  return Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Des, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr @Exp, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP1]], i128 [[TMP0]] seq_cst seq_cst, align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
+// CHECK-NEXT:    br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
+// CHECK-NEXT:    store i128 [[TMP4]], ptr @Exp, align 16
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP3]]
+//
+_Bool f7() {
+  return __atomic_compare_exchange_n(&Ptr, &Exp, Des, 0,
+                                     memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Exp, align 16
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr @Des, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = cmpxchg ptr @Ptr, i128 [[TMP0]], i128 [[TMP1]] seq_cst seq_cst, align 16
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i128, i1 } [[TMP2]], 1
+// CHECK-NEXT:    br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { i128, i1 } [[TMP2]], 0
+// CHECK-NEXT:    store i128 [[TMP4]], ptr @Exp, align 16
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP3]]
+//
+_Bool f8() {
+  return __atomic_compare_exchange(&Ptr, &Exp, &Des, 0,
+                                   memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f9() {
+  return __atomic_add_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f10() {
+  return __atomic_sub_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f11() {
+  return __atomic_and_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f12() {
+  return __atomic_xor_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f13() {
+  return __atomic_or_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i128 [[TMP2]], -1
+// CHECK-NEXT:    store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f14() {
+  return __atomic_nand_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw add ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f15() {
+  return __atomic_fetch_add(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw sub ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f16() {
+  return __atomic_fetch_sub(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f17() {
+  return __atomic_fetch_and(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f18() {
+  return __atomic_fetch_xor(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f19() {
+  return __atomic_fetch_or(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 16, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw nand ptr @Ptr, i128 [[TMP0]] seq_cst, align 16
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f20() {
+  return __atomic_fetch_nand(&Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c
new file mode 100644
index 000000000000000..e38e6572bd58f4e
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i128-8Al.c
@@ -0,0 +1,301 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for __int128 (with default alignment of 8 bytes
+// only), resulting in libcalls.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+__int128 Ptr;
+__int128 Ret;
+__int128 Val;
+__int128 Exp;
+__int128 Des;
+
+// TODO: This test and several more below have the unnecessary use of an alloca
+// remaining. This is due to 369c9b7, which changes the behavior of the MemCpyOpt
+// pass. It seems that a 'writable' attribute should now be added to the argument
+// in order for this optimization to proceed.
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    call void @__atomic_load(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull [[ATOMIC_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 8, !tbaa [[TBAA2:![0-9]+]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f1() {
+  return __atomic_load_n(&Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @__atomic_load(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Ret, i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Ret, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f2() {
+  __atomic_load(&Ptr, &Ret, memory_order_seq_cst);
+  return Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[DOTATOMICTMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_store(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull [[DOTATOMICTMP]], i32 noundef signext 5)
+// CHECK-NEXT:    ret void
+//
+void f3() {
+  __atomic_store_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @__atomic_store(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Val, i32 noundef signext 5)
+// CHECK-NEXT:    ret void
+//
+void f4() {
+  __atomic_store(&Ptr, &Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[ATOMIC_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[DOTATOMICTMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull [[DOTATOMICTMP]], ptr noundef nonnull [[ATOMIC_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[ATOMIC_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f5() {
+  return __atomic_exchange_n(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    tail call void @__atomic_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Val, ptr noundef nonnull @Ret, i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Ret, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f6() {
+  __atomic_exchange(&Ptr, &Val, &Ret, memory_order_seq_cst);
+  return Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[DOTATOMICTMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Des, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[DOTATOMICTMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[CALL:%.*]] = call zeroext i1 @__atomic_compare_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Exp, ptr noundef nonnull [[DOTATOMICTMP]], i32 noundef signext 5, i32 noundef signext 5)
+// CHECK-NEXT:    ret i1 [[CALL]]
+//
+_Bool f7() {
+  return __atomic_compare_exchange_n(&Ptr, &Exp, Des, 0,
+                                     memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CALL:%.*]] = tail call zeroext i1 @__atomic_compare_exchange(i64 noundef 16, ptr noundef nonnull @Ptr, ptr noundef nonnull @Exp, ptr noundef nonnull @Des, i32 noundef signext 5, i32 noundef signext 5)
+// CHECK-NEXT:    ret i1 [[CALL]]
+//
+_Bool f8() {
+  return __atomic_compare_exchange(&Ptr, &Exp, &Des, 0,
+                                   memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_add_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = add i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f9() {
+  return __atomic_add_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_sub_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = sub i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f10() {
+  return __atomic_sub_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_and_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f11() {
+  return __atomic_and_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_xor_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = xor i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f12() {
+  return __atomic_xor_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_or_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = or i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    store i128 [[TMP2]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f13() {
+  return __atomic_or_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_nand_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    [[TMP2:%.*]] = and i128 [[TMP1]], [[TMP0]]
+// CHECK-NEXT:    [[TMP3:%.*]] = xor i128 [[TMP2]], -1
+// CHECK-NEXT:    store i128 [[TMP3]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f14() {
+  return __atomic_nand_fetch(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_add_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f15() {
+  return __atomic_fetch_add(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_sub_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f16() {
+  return __atomic_fetch_sub(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_and_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f17() {
+  return __atomic_fetch_and(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_xor_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f18() {
+  return __atomic_fetch_xor(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_or_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f19() {
+  return __atomic_fetch_or(&Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[INDIRECT_ARG_TEMP:%.*]] = alloca i128, align 8
+// CHECK-NEXT:    [[TMP0:%.*]] = load i128, ptr @Val, align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    call void @__atomic_fetch_nand_16(ptr nonnull sret(i128) align 8 [[TMP]], ptr noundef nonnull @Ptr, ptr noundef nonnull [[INDIRECT_ARG_TEMP]], i32 noundef signext 5)
+// CHECK-NEXT:    [[TMP1:%.*]] = load i128, ptr [[TMP]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    store i128 [[TMP1]], ptr [[AGG_RESULT:%.*]], align 8, !tbaa [[TBAA2]]
+// CHECK-NEXT:    ret void
+//
+__int128 f20() {
+  return __atomic_fetch_nand(&Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c
new file mode 100644
index 000000000000000..7c6a82f14197a15
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i16.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int16_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f1(int16_t *Ptr) {
+  return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i16, ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT:    store i16 [[TMP0]], ptr [[RET:%.*]], align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f2(int16_t *Ptr, int16_t *Ret) {
+  __atomic_load(Ptr, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    store atomic i16 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret void
+//
+void f3(int16_t *Ptr, int16_t Val) {
+  __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
+// CHECK-NEXT:    store atomic i16 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret void
+//
+void f4(int16_t *Ptr, int16_t *Val) {
+  __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f5(int16_t *Ptr, int16_t Val) {
+  return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[VAL:%.*]], align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i16 [[TMP0]] seq_cst, align 2
+// CHECK-NEXT:    store i16 [[TMP1]], ptr [[RET:%.*]], align 2
+// CHECK-NEXT:    ret i16 [[TMP1]]
+//
+int16_t f6(int16_t *Ptr, int16_t *Val, int16_t *Ret) {
+  __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[DES:%.*]] seq_cst seq_cst, align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
+// CHECK-NEXT:    br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
+// CHECK-NEXT:    store i16 [[TMP3]], ptr [[EXP]], align 2
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP2]]
+//
+_Bool f7(int16_t *Ptr, int16_t *Exp, int16_t Des) {
+  return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+                                     memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i16, ptr [[EXP:%.*]], align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = load i16, ptr [[DES:%.*]], align 2
+// CHECK-NEXT:    [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i16 [[TMP0]], i16 [[TMP1]] seq_cst seq_cst, align 2
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i16, i1 } [[TMP2]], 1
+// CHECK-NEXT:    br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { i16, i1 } [[TMP2]], 0
+// CHECK-NEXT:    store i16 [[TMP4]], ptr [[EXP]], align 2
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP3]]
+//
+_Bool f8(int16_t *Ptr, int16_t *Exp, int16_t *Des) {
+  return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+                                   memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = add i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i16 [[TMP1]]
+//
+int16_t f9(int16_t *Ptr, int16_t Val) {
+  return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = sub i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i16 [[TMP1]]
+//
+int16_t f10(int16_t *Ptr, int16_t Val) {
+  return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i16 [[TMP1]]
+//
+int16_t f11(int16_t *Ptr, int16_t Val) {
+  return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i16 [[TMP1]]
+//
+int16_t f12(int16_t *Ptr, int16_t Val) {
+  return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = or i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i16 [[TMP1]]
+//
+int16_t f13(int16_t *Ptr, int16_t Val) {
+  return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    [[TMP1:%.*]] = and i16 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    [[TMP2:%.*]] = xor i16 [[TMP1]], -1
+// CHECK-NEXT:    ret i16 [[TMP2]]
+//
+int16_t f14(int16_t *Ptr, int16_t Val) {
+  return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f15(int16_t *Ptr, int16_t Val) {
+  return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f16(int16_t *Ptr, int16_t Val) {
+  return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f17(int16_t *Ptr, int16_t Val) {
+  return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f18(int16_t *Ptr, int16_t Val) {
+  return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f19(int16_t *Ptr, int16_t Val) {
+  return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i16 [[VAL:%.*]] seq_cst, align 2
+// CHECK-NEXT:    ret i16 [[TMP0]]
+//
+int16_t f20(int16_t *Ptr, int16_t Val) {
+  return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c
new file mode 100644
index 000000000000000..ba630e7c952e5b2
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i32.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int32_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i32, ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f1(int32_t *Ptr) {
+  return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i32, ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT:    store i32 [[TMP0]], ptr [[RET:%.*]], align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f2(int32_t *Ptr, int32_t *Ret) {
+  __atomic_load(Ptr, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    store atomic i32 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret void
+//
+void f3(int32_t *Ptr, int32_t Val) {
+  __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
+// CHECK-NEXT:    store atomic i32 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret void
+//
+void f4(int32_t *Ptr, int32_t *Val) {
+  __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f5(int32_t *Ptr, int32_t Val) {
+  return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[VAL:%.*]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i32 [[TMP0]] seq_cst, align 4
+// CHECK-NEXT:    store i32 [[TMP1]], ptr [[RET:%.*]], align 4
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
+int32_t f6(int32_t *Ptr, int32_t *Val, int32_t *Ret) {
+  __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[EXP:%.*]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i32 [[TMP0]], i32 [[DES:%.*]] seq_cst seq_cst, align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
+// CHECK-NEXT:    br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
+// CHECK-NEXT:    store i32 [[TMP3]], ptr [[EXP]], align 4
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP2]]
+//
+_Bool f7(int32_t *Ptr, int32_t *Exp, int32_t Des) {
+  return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+                                     memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i32, ptr [[EXP:%.*]], align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = load i32, ptr [[DES:%.*]], align 4
+// CHECK-NEXT:    [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i32 [[TMP0]], i32 [[TMP1]] seq_cst seq_cst, align 4
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i32, i1 } [[TMP2]], 1
+// CHECK-NEXT:    br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { i32, i1 } [[TMP2]], 0
+// CHECK-NEXT:    store i32 [[TMP4]], ptr [[EXP]], align 4
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP3]]
+//
+_Bool f8(int32_t *Ptr, int32_t *Exp, int32_t *Des) {
+  return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+                                   memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = add i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
+int32_t f9(int32_t *Ptr, int32_t Val) {
+  return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = sub i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
+int32_t f10(int32_t *Ptr, int32_t Val) {
+  return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
+int32_t f11(int32_t *Ptr, int32_t Val) {
+  return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
+int32_t f12(int32_t *Ptr, int32_t Val) {
+  return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i32 [[TMP1]]
+//
+int32_t f13(int32_t *Ptr, int32_t Val) {
+  return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    [[TMP2:%.*]] = xor i32 [[TMP1]], -1
+// CHECK-NEXT:    ret i32 [[TMP2]]
+//
+int32_t f14(int32_t *Ptr, int32_t Val) {
+  return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f15(int32_t *Ptr, int32_t Val) {
+  return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f16(int32_t *Ptr, int32_t Val) {
+  return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f17(int32_t *Ptr, int32_t Val) {
+  return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f18(int32_t *Ptr, int32_t Val) {
+  return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f19(int32_t *Ptr, int32_t Val) {
+  return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i32 [[VAL:%.*]] seq_cst, align 4
+// CHECK-NEXT:    ret i32 [[TMP0]]
+//
+int32_t f20(int32_t *Ptr, int32_t Val) {
+  return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c
new file mode 100644
index 000000000000000..25c69ee8c54bf53
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i64.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int64_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f1(int64_t *Ptr) {
+  return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT:    store i64 [[TMP0]], ptr [[RET:%.*]], align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f2(int64_t *Ptr, int64_t *Ret) {
+  __atomic_load(Ptr, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    store atomic i64 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret void
+//
+void f3(int64_t *Ptr, int64_t Val) {
+  __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8
+// CHECK-NEXT:    store atomic i64 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret void
+//
+void f4(int64_t *Ptr, int64_t *Val) {
+  __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f5(int64_t *Ptr, int64_t Val) {
+  return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[TMP0]] seq_cst, align 8
+// CHECK-NEXT:    store i64 [[TMP1]], ptr [[RET:%.*]], align 8
+// CHECK-NEXT:    ret i64 [[TMP1]]
+//
+int64_t f6(int64_t *Ptr, int64_t *Val, int64_t *Ret) {
+  __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[DES:%.*]] seq_cst seq_cst, align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
+// CHECK-NEXT:    br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
+// CHECK-NEXT:    store i64 [[TMP3]], ptr [[EXP]], align 8
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP2]]
+//
+_Bool f7(int64_t *Ptr, int64_t *Exp, int64_t Des) {
+  return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+                                     memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = load i64, ptr [[DES:%.*]], align 8
+// CHECK-NEXT:    [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[TMP1]] seq_cst seq_cst, align 8
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1
+// CHECK-NEXT:    br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0
+// CHECK-NEXT:    store i64 [[TMP4]], ptr [[EXP]], align 8
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP3]]
+//
+_Bool f8(int64_t *Ptr, int64_t *Exp, int64_t *Des) {
+  return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+                                   memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = add i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i64 [[TMP1]]
+//
+int64_t f9(int64_t *Ptr, int64_t Val) {
+  return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = sub i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i64 [[TMP1]]
+//
+int64_t f10(int64_t *Ptr, int64_t Val) {
+  return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i64 [[TMP1]]
+//
+int64_t f11(int64_t *Ptr, int64_t Val) {
+  return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i64 [[TMP1]]
+//
+int64_t f12(int64_t *Ptr, int64_t Val) {
+  return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = or i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i64 [[TMP1]]
+//
+int64_t f13(int64_t *Ptr, int64_t Val) {
+  return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    [[TMP2:%.*]] = xor i64 [[TMP1]], -1
+// CHECK-NEXT:    ret i64 [[TMP2]]
+//
+int64_t f14(int64_t *Ptr, int64_t Val) {
+  return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f15(int64_t *Ptr, int64_t Val) {
+  return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f16(int64_t *Ptr, int64_t Val) {
+  return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f17(int64_t *Ptr, int64_t Val) {
+  return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f18(int64_t *Ptr, int64_t Val) {
+  return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f19(int64_t *Ptr, int64_t Val) {
+  return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8
+// CHECK-NEXT:    ret i64 [[TMP0]]
+//
+int64_t f20(int64_t *Ptr, int64_t Val) {
+  return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c
new file mode 100644
index 000000000000000..1f4b455bc02610a
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic-builtins-i8.c
@@ -0,0 +1,219 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test GNU atomic builtins for int8_t.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+// CHECK-LABEL: @f1(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f1(int8_t *Ptr) {
+  return __atomic_load_n(Ptr, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f2(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load atomic i8, ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT:    store i8 [[TMP0]], ptr [[RET:%.*]], align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f2(int8_t *Ptr, int8_t *Ret) {
+  __atomic_load(Ptr, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f3(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    store atomic i8 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret void
+//
+void f3(int8_t *Ptr, int8_t Val) {
+  __atomic_store_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f4(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[VAL:%.*]], align 1
+// CHECK-NEXT:    store atomic i8 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret void
+//
+void f4(int8_t *Ptr, int8_t *Val) {
+  __atomic_store(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f5(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f5(int8_t *Ptr, int8_t Val) {
+  return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f6(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[VAL:%.*]], align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i8 [[TMP0]] seq_cst, align 1
+// CHECK-NEXT:    store i8 [[TMP1]], ptr [[RET:%.*]], align 1
+// CHECK-NEXT:    ret i8 [[TMP1]]
+//
+int8_t f6(int8_t *Ptr, int8_t *Val, int8_t *Ret) {
+  __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst);
+  return *Ret;
+}
+
+// CHECK-LABEL: @f7(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[EXP:%.*]], align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i8 [[TMP0]], i8 [[DES:%.*]] seq_cst seq_cst, align 1
+// CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
+// CHECK-NEXT:    br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
+// CHECK-NEXT:    store i8 [[TMP3]], ptr [[EXP]], align 1
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP2]]
+//
+_Bool f7(int8_t *Ptr, int8_t *Exp, int8_t Des) {
+  return __atomic_compare_exchange_n(Ptr, Exp, Des, 0,
+                                     memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f8(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = load i8, ptr [[EXP:%.*]], align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = load i8, ptr [[DES:%.*]], align 1
+// CHECK-NEXT:    [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i8 [[TMP0]], i8 [[TMP1]] seq_cst seq_cst, align 1
+// CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { i8, i1 } [[TMP2]], 1
+// CHECK-NEXT:    br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]]
+// CHECK:       cmpxchg.store_expected:
+// CHECK-NEXT:    [[TMP4:%.*]] = extractvalue { i8, i1 } [[TMP2]], 0
+// CHECK-NEXT:    store i8 [[TMP4]], ptr [[EXP]], align 1
+// CHECK-NEXT:    br label [[CMPXCHG_CONTINUE]]
+// CHECK:       cmpxchg.continue:
+// CHECK-NEXT:    ret i1 [[TMP3]]
+//
+_Bool f8(int8_t *Ptr, int8_t *Exp, int8_t *Des) {
+  return __atomic_compare_exchange(Ptr, Exp, Des, 0,
+                                   memory_order_seq_cst, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f9(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = add i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i8 [[TMP1]]
+//
+int8_t f9(int8_t *Ptr, int8_t Val) {
+  return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f10(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = sub i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i8 [[TMP1]]
+//
+int8_t f10(int8_t *Ptr, int8_t Val) {
+  return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f11(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i8 [[TMP1]]
+//
+int8_t f11(int8_t *Ptr, int8_t Val) {
+  return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f12(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = xor i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i8 [[TMP1]]
+//
+int8_t f12(int8_t *Ptr, int8_t Val) {
+  return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f13(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = or i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    ret i8 [[TMP1]]
+//
+int8_t f13(int8_t *Ptr, int8_t Val) {
+  return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f14(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[TMP0]], [[VAL]]
+// CHECK-NEXT:    [[TMP2:%.*]] = xor i8 [[TMP1]], -1
+// CHECK-NEXT:    ret i8 [[TMP2]]
+//
+int8_t f14(int8_t *Ptr, int8_t Val) {
+  return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f15(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f15(int8_t *Ptr, int8_t Val) {
+  return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f16(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f16(int8_t *Ptr, int8_t Val) {
+  return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f17(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f17(int8_t *Ptr, int8_t Val) {
+  return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f18(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f18(int8_t *Ptr, int8_t Val) {
+  return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f19(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f19(int8_t *Ptr, int8_t Val) {
+  return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst);
+}
+
+// CHECK-LABEL: @f20(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i8 [[VAL:%.*]] seq_cst, align 1
+// CHECK-NEXT:    ret i8 [[TMP0]]
+//
+int8_t f20(int8_t *Ptr, int8_t Val) {
+  return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
new file mode 100644
index 000000000000000..343bfb36ebfdccf
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
@@ -0,0 +1,54 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test __atomic_is_lock_free() and __atomic_always_lock_free() for __int128
+// with 16 byte alignment.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+__int128 Int128_Al16 __attribute__((aligned(16)));
+
+// CHECK-LABEL: @fun_PtrAl16_is_lock_free(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CALL:%.*]] = tail call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef nonnull @Int128_Al16) #[[ATTR2]]
+// CHECK-NEXT:    ret i1 [[CALL]]
+//
+_Bool fun_PtrAl16_is_lock_free() {
+  return __atomic_is_lock_free(16, &Int128_Al16);
+}
+
+// CHECK-LABEL: @fun_PtrAl16_always_lock_free(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    ret i1 false
+//
+_Bool fun_PtrAl16_always_lock_free() {
+  return __atomic_always_lock_free(16, &Int128_Al16);
+}
+
+// Also test these with a 16 byte size and null-pointer.
+// CHECK-LABEL: @fun_noptr_is_lock_free(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    ret i1 true
+//
+_Bool fun_noptr_is_lock_free() {
+  return __atomic_is_lock_free(16, 0);
+}
+
+// CHECK-LABEL: @fun_noptr_always_lock_free(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    ret i1 true
+//
+_Bool fun_noptr_always_lock_free() {
+  return __atomic_always_lock_free(16, 0);
+}
+
+// Also test __c11_atomic_is_lock_free() with a 16 byte size.
+// CHECK-LABEL: @fun_c11_is_lock_free(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    ret i1 true
+//
+_Bool fun_c11_is_lock_free() {
+  return __c11_atomic_is_lock_free(16);
+}
+
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c
new file mode 100644
index 000000000000000..f8e39996ad6d51f
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c
@@ -0,0 +1,28 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test __atomic_is_lock_free() and __atomic_always_lock_free() for __int128
+// with (default) 8 byte alignment.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+__int128 Int128_Al8 __attribute__((aligned(8)));
+
+// CHECK-LABEL: @fun_PtrAl16_is_lock_free(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[CALL:%.*]] = tail call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef nonnull @Int128_Al16) #[[ATTR2]]
+// CHECK-NEXT:    ret i1 [[CALL]]
+//
+_Bool fun_PtrAl8_is_lock_free() {
+  return __atomic_is_lock_free(16, &Int128_Al8);
+}
+
+// CHECK-LABEL: @fun_PtrAl16_always_lock_free(
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    ret i1 false
+//
+_Bool fun_PtrAl8_always_lock_free() {
+  return __atomic_always_lock_free(16, &Int128_Al8);
+}
+
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 4e57986206dc680..522b3dea46d81f1 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -129,6 +129,8 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
   setBooleanContents(ZeroOrOneBooleanContent);
   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
 
+  setMaxAtomicSizeInBitsSupported(128);
+
   // Instructions are strings of 2-byte aligned 2-byte values.
   setMinFunctionAlignment(Align(2));
   // For performance reasons we prefer 16-byte alignment.
@@ -874,9 +876,11 @@ bool SystemZTargetLowering::hasInlineStackProbe(const MachineFunction &MF) const
 
 TargetLowering::AtomicExpansionKind
 SystemZTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
+  // TODO: expand them all here instead of in backend.
   return (RMW->isFloatingPointOperation() ||
           RMW->getOperation() == AtomicRMWInst::UIncWrap ||
-          RMW->getOperation() == AtomicRMWInst::UDecWrap)
+          RMW->getOperation() == AtomicRMWInst::UDecWrap ||
+          RMW->getType()->isIntegerTy(128))
              ? AtomicExpansionKind::CmpXChg
              : AtomicExpansionKind::None;
 }
diff --git a/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll b/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll
index 1838b9297ff4c7a..0e8f04468022202 100644
--- a/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll
+++ b/llvm/test/CodeGen/SystemZ/atomicrmw-ops-i128.ll
@@ -1,103 +1,511 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; Test i128 atomicrmw operations.
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z15 | FileCheck %s
+;
+; Test expansion of AtomicRMW instructions, which assume a natural alignment.
+; Note that the multiple regmoves inside the CDSG loops hopefully will go away
+; when the new i128 support is added.
 
 ; Check register exchange.
-define i128 @f1(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f1:
-; CHECK: brasl %r14, __sync_lock_test_and_set_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_xchg(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_xchg:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r12, %r15, 96(%r15)
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r14, 8(%r4)
+; CHECK-NEXT:    lg %r0, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:    lgr %r1, %r14
+; CHECK-NEXT:  .LBB0_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    lgr %r12, %r5
+; CHECK-NEXT:    lgr %r13, %r4
+; CHECK-NEXT:    cdsg %r12, %r0, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r13
+; CHECK-NEXT:    lgr %r5, %r12
+; CHECK-NEXT:    jl .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r12, %r15, 96(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw xchg ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check addition of a variable.
-define i128 @f2(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f2:
-; CHECK: brasl %r14, __sync_fetch_and_add_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_add(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_add:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB1_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    algrk %r13, %r4, %r0
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    alcgr %r5, %r1
+; CHECK-NEXT:    lgr %r12, %r5
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB1_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw add ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check subtraction of a variable.
-define i128 @f3(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f3:
-; CHECK: brasl %r14, __sync_fetch_and_sub_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_sub(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_sub:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB2_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    slgrk %r13, %r4, %r0
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    slbgr %r5, %r1
+; CHECK-NEXT:    lgr %r12, %r5
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB2_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw sub ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check AND of a variable.
-define i128 @f4(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f4:
-; CHECK: brasl %r14, __sync_fetch_and_and_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_and(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_and:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB3_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ngrk %r12, %r5, %r1
+; CHECK-NEXT:    ngrk %r13, %r4, %r0
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB3_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw and ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check NAND of a variable.
-define i128 @f5(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f5:
-; CHECK: brasl %r14, __sync_fetch_and_nand_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_nand(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_nand:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB4_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    nngrk %r12, %r5, %r1
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    nngrk %r13, %r4, %r0
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB4_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw nand ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check OR of a variable.
-define i128 @f6(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f6:
-; CHECK: brasl %r14, __sync_fetch_and_or_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_or(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_or:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB5_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    ogrk %r12, %r5, %r1
+; CHECK-NEXT:    ogrk %r13, %r4, %r0
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB5_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw or ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check XOR of a variable.
-define i128 @f7(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f7:
-; CHECK: brasl %r14, __sync_fetch_and_xor_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_xor(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_xor:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB6_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    xgrk %r12, %r5, %r1
+; CHECK-NEXT:    xgrk %r13, %r4, %r0
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB6_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw xor ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check signed minimum.
-define i128 @f8(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f8:
-; CHECK: brasl %r14, __sync_fetch_and_min_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_min(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_min:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB7_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    clgr %r4, %r0
+; CHECK-NEXT:    lhi %r14, 0
+; CHECK-NEXT:    lochile %r14, 1
+; CHECK-NEXT:    cgr %r5, %r1
+; CHECK-NEXT:    lhi %r13, 0
+; CHECK-NEXT:    lochile %r13, 1
+; CHECK-NEXT:    locrlh %r14, %r13
+; CHECK-NEXT:    chi %r14, 0
+; CHECK-NEXT:    selgrlh %r13, %r4, %r0
+; CHECK-NEXT:    selgrlh %r12, %r5, %r1
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB7_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw min ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check signed maximum.
-define i128 @f9(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f9:
-; CHECK: brasl %r14, __sync_fetch_and_max_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_max(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_max:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB8_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    clgr %r4, %r0
+; CHECK-NEXT:    lhi %r14, 0
+; CHECK-NEXT:    lochih %r14, 1
+; CHECK-NEXT:    cgr %r5, %r1
+; CHECK-NEXT:    lhi %r13, 0
+; CHECK-NEXT:    lochih %r13, 1
+; CHECK-NEXT:    locrlh %r14, %r13
+; CHECK-NEXT:    chi %r14, 0
+; CHECK-NEXT:    selgrlh %r13, %r4, %r0
+; CHECK-NEXT:    selgrlh %r12, %r5, %r1
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB8_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw max ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check unsigned minimum.
-define i128 @f10(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f10:
-; CHECK: brasl %r14, __sync_fetch_and_umin_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_umin(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_umin:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB9_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    clgr %r5, %r1
+; CHECK-NEXT:    lhi %r14, 0
+; CHECK-NEXT:    lochile %r14, 1
+; CHECK-NEXT:    clgr %r4, %r0
+; CHECK-NEXT:    lhi %r13, 0
+; CHECK-NEXT:    lochile %r13, 1
+; CHECK-NEXT:    cgr %r5, %r1
+; CHECK-NEXT:    locre %r14, %r13
+; CHECK-NEXT:    chi %r14, 0
+; CHECK-NEXT:    selgrlh %r13, %r4, %r0
+; CHECK-NEXT:    selgrlh %r12, %r5, %r1
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB9_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw umin ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
 ; Check unsigned maximum.
-define i128 @f11(i128 %dummy, ptr %src, i128 %b) {
-; CHECK-LABEL: f11:
-; CHECK: brasl %r14, __sync_fetch_and_umax_16 at PLT
-; CHECK: br %r14
+define i128 @atomicrmw_umax(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_umax:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r4, 8(%r3)
+; CHECK-NEXT:    lg %r5, 0(%r3)
+; CHECK-NEXT:  .LBB10_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    clgr %r5, %r1
+; CHECK-NEXT:    lhi %r14, 0
+; CHECK-NEXT:    lochih %r14, 1
+; CHECK-NEXT:    clgr %r4, %r0
+; CHECK-NEXT:    lhi %r13, 0
+; CHECK-NEXT:    lochih %r13, 1
+; CHECK-NEXT:    cgr %r5, %r1
+; CHECK-NEXT:    locre %r14, %r13
+; CHECK-NEXT:    chi %r14, 0
+; CHECK-NEXT:    selgrlh %r13, %r4, %r0
+; CHECK-NEXT:    selgrlh %r12, %r5, %r1
+; CHECK-NEXT:    lgr %r10, %r5
+; CHECK-NEXT:    lgr %r11, %r4
+; CHECK-NEXT:    cdsg %r10, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r4, %r11
+; CHECK-NEXT:    lgr %r5, %r10
+; CHECK-NEXT:    jl .LBB10_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r5, 0(%r2)
+; CHECK-NEXT:    stg %r4, 8(%r2)
+; CHECK-NEXT:    lmg %r10, %r15, 80(%r15)
+; CHECK-NEXT:    br %r14
   %res = atomicrmw umax ptr %src, i128 %b seq_cst
   ret i128 %res
 }
 
+; Check increment with wraparound.
+define i128 @atomicrmw_uinc_wrap(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_uinc_wrap:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r8, %r15, 64(%r15)
+; CHECK-NEXT:    .cfi_offset %r8, -96
+; CHECK-NEXT:    .cfi_offset %r9, -88
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r5, 8(%r3)
+; CHECK-NEXT:    lg %r14, 0(%r3)
+; CHECK-NEXT:    lghi %r4, 0
+; CHECK-NEXT:  .LBB11_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    alghsik %r13, %r5, 1
+; CHECK-NEXT:    lgr %r12, %r14
+; CHECK-NEXT:    lhi %r11, 0
+; CHECK-NEXT:    alcgr %r12, %r4
+; CHECK-NEXT:    clgr %r14, %r1
+; CHECK-NEXT:    lochihe %r11, 1
+; CHECK-NEXT:    clgr %r5, %r0
+; CHECK-NEXT:    lhi %r10, 0
+; CHECK-NEXT:    lochihe %r10, 1
+; CHECK-NEXT:    cgr %r14, %r1
+; CHECK-NEXT:    locre %r11, %r10
+; CHECK-NEXT:    chi %r11, 0
+; CHECK-NEXT:    locghilh %r13, 0
+; CHECK-NEXT:    locghilh %r12, 0
+; CHECK-NEXT:    lgr %r8, %r14
+; CHECK-NEXT:    lgr %r9, %r5
+; CHECK-NEXT:    cdsg %r8, %r12, 0(%r3)
+; CHECK-NEXT:    lgr %r5, %r9
+; CHECK-NEXT:    lgr %r14, %r8
+; CHECK-NEXT:    jl .LBB11_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r14, 0(%r2)
+; CHECK-NEXT:    stg %r5, 8(%r2)
+; CHECK-NEXT:    lmg %r8, %r15, 64(%r15)
+; CHECK-NEXT:    br %r14
+  %res = atomicrmw uinc_wrap ptr %src, i128 %b seq_cst
+  ret i128 %res
+}
+
+; Check decrement with wraparound.
+define i128 @atomicrmw_udec_wrap(ptr %src, i128 %b) {
+; CHECK-LABEL: atomicrmw_udec_wrap:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    stmg %r6, %r15, 48(%r15)
+; CHECK-NEXT:    .cfi_offset %r6, -112
+; CHECK-NEXT:    .cfi_offset %r7, -104
+; CHECK-NEXT:    .cfi_offset %r9, -88
+; CHECK-NEXT:    .cfi_offset %r10, -80
+; CHECK-NEXT:    .cfi_offset %r11, -72
+; CHECK-NEXT:    .cfi_offset %r12, -64
+; CHECK-NEXT:    .cfi_offset %r13, -56
+; CHECK-NEXT:    .cfi_offset %r14, -48
+; CHECK-NEXT:    .cfi_offset %r15, -40
+; CHECK-NEXT:    lg %r0, 8(%r4)
+; CHECK-NEXT:    lg %r1, 0(%r4)
+; CHECK-NEXT:    lg %r5, 8(%r3)
+; CHECK-NEXT:    lg %r14, 0(%r3)
+; CHECK-NEXT:    lghi %r4, -1
+; CHECK-NEXT:  .LBB12_1: # %atomicrmw.start
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    alghsik %r13, %r5, -1
+; CHECK-NEXT:    lgr %r12, %r14
+; CHECK-NEXT:    lhi %r10, 0
+; CHECK-NEXT:    alcgr %r12, %r4
+; CHECK-NEXT:    ogrk %r11, %r5, %r14
+; CHECK-NEXT:    lhi %r11, 0
+; CHECK-NEXT:    lochie %r11, 1
+; CHECK-NEXT:    clgr %r14, %r1
+; CHECK-NEXT:    lochih %r10, 1
+; CHECK-NEXT:    clgr %r5, %r0
+; CHECK-NEXT:    lhi %r9, 0
+; CHECK-NEXT:    lochih %r9, 1
+; CHECK-NEXT:    cgr %r14, %r1
+; CHECK-NEXT:    locre %r10, %r9
+; CHECK-NEXT:    or %r11, %r10
+; CHECK-NEXT:    selgrl %r11, %r0, %r13
+; CHECK-NEXT:    selgrl %r10, %r1, %r12
+; CHECK-NEXT:    lgr %r6, %r14
+; CHECK-NEXT:    lgr %r7, %r5
+; CHECK-NEXT:    cdsg %r6, %r10, 0(%r3)
+; CHECK-NEXT:    lgr %r5, %r7
+; CHECK-NEXT:    lgr %r14, %r6
+; CHECK-NEXT:    jl .LBB12_1
+; CHECK-NEXT:  # %bb.2: # %atomicrmw.end
+; CHECK-NEXT:    stg %r14, 0(%r2)
+; CHECK-NEXT:    stg %r5, 8(%r2)
+; CHECK-NEXT:    lmg %r6, %r15, 48(%r15)
+; CHECK-NEXT:    br %r14
+  %res = atomicrmw udec_wrap ptr %src, i128 %b seq_cst
+  ret i128 %res
+}
diff --git a/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll b/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll
index 80cc85158e45e6a..b9e29599af7ee78 100644
--- a/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll
+++ b/llvm/test/CodeGen/SystemZ/atomicrmw-xchg-07.ll
@@ -2,23 +2,28 @@
 ;
 ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
 
-define void @f1(ptr %ret, ptr %src, ptr %b) {
+define void @f1(ptr align 16 %ret, ptr align 16 %src, ptr align 16 %b) {
 ; CHECK-LABEL: f1:
-; CHECK: lg [[RH:%r[0-9]+]], 8(%r4)
-; CHECK: lgr [[RET:%r[0-9]+]], %r2
-; CHECK: lg [[RL:%r[0-9]+]], 0(%r4)
-; CHECK: stg [[RH]], 168(%r15)
-; CHECK: la %r2, 176(%r15)
-; CHECK: la %r4, 160(%r15)
-; CHECK: stg [[RL]], 160(%r15)
-; CHECK: brasl %r14, __sync_lock_test_and_set_16 at PLT
-; CHECK: lg [[RH2:%r[0-9]+]], 184(%r15)
-; CHECK: lg [[RL2:%r[0-9]+]], 176(%r15)
-; CHECK: stg [[RH]], 8([[RET]])
-; CHECK: stg [[RL]], 0([[RET]])
-; CHECK: br %r14
-  %val = load fp128, ptr %b, align 8
+; CHECK:       lg      %r14, 8(%r4)
+; CHECK-NEXT:  lg      %r0, 0(%r4)
+; CHECK-NEXT:  lg      %r4, 8(%r3)
+; CHECK-NEXT:  lg      %r5, 0(%r3)
+; CHECK-NEXT:  lgr     %r1, %r14
+; CHECK-NEXT:.LBB0_1:                          # %atomicrmw.start
+; CHECK-NEXT:                                  # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:  lgr     %r12, %r5
+; CHECK-NEXT:  lgr     %r13, %r4
+; CHECK-NEXT:  cdsg    %r12, %r0, 0(%r3)
+; CHECK-NEXT:  lgr     %r4, %r13
+; CHECK-NEXT:  lgr     %r5, %r12
+; CHECK-NEXT:  jl      .LBB0_1
+; CHECK-NEXT:# %bb.2:                          # %atomicrmw.end
+; CHECK-NEXT:  stg     %r5, 0(%r2)
+; CHECK-NEXT:  stg     %r4, 8(%r2)
+; CHECK-NEXT:  lmg     %r12, %r15, 96(%r15)
+; CHECK-NEXT:  br      %r14
+  %val = load fp128, ptr %b, align 16
   %res = atomicrmw xchg ptr %src, fp128 %val seq_cst
-  store fp128 %res, ptr %ret, align 8
+  store fp128 %res, ptr %ret, align 16
   ret void
 }

>From 0432ffb4f69e135cef95243771ec1e37d6bf79a0 Mon Sep 17 00:00:00 2001
From: Jonas Paulsson <paulson1 at linux.ibm.com>
Date: Fri, 24 Nov 2023 15:07:18 +0100
Subject: [PATCH 2/5] Rework testing of gnu-atomic_is_lock_free

---
 .../gnu-atomic_is_lock_free-i128-16Al.c       | 42 +++++++------------
 .../gnu-atomic_is_lock_free-i128-8Al.c        | 10 ++---
 2 files changed, 20 insertions(+), 32 deletions(-)

diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
index 343bfb36ebfdccf..2e1565fcb2984cf 100644
--- a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
@@ -1,4 +1,3 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
 //
 // Test __atomic_is_lock_free() and __atomic_always_lock_free() for __int128
@@ -9,46 +8,35 @@
 
 __int128 Int128_Al16 __attribute__((aligned(16)));
 
-// CHECK-LABEL: @fun_PtrAl16_is_lock_free(
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CALL:%.*]] = tail call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef nonnull @Int128_Al16) #[[ATTR2]]
-// CHECK-NEXT:    ret i1 [[CALL]]
-//
-_Bool fun_PtrAl16_is_lock_free() {
+// CHECK-LABEL: @fun0
+// CHECK:       tail call zeroext i1 @__atomic_is_lock_free
+_Bool fun0() {
   return __atomic_is_lock_free(16, &Int128_Al16);
 }
 
-// CHECK-LABEL: @fun_PtrAl16_always_lock_free(
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    ret i1 false
-//
-_Bool fun_PtrAl16_always_lock_free() {
+// CHECK-LABEL: @fun1
+// CHECK:       ret i1 false
+_Bool fun1() {
   return __atomic_always_lock_free(16, &Int128_Al16);
 }
 
 // Also test these with a 16 byte size and null-pointer.
-// CHECK-LABEL: @fun_noptr_is_lock_free(
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    ret i1 true
-//
-_Bool fun_noptr_is_lock_free() {
+// CHECK-LABEL: @fun2
+// CHECK:       ret i1 true
+_Bool fun2() {
   return __atomic_is_lock_free(16, 0);
 }
 
-// CHECK-LABEL: @fun_noptr_always_lock_free(
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    ret i1 true
-//
-_Bool fun_noptr_always_lock_free() {
+// CHECK-LABEL: @fun3
+// CHECK:       ret i1 true
+_Bool fun3() {
   return __atomic_always_lock_free(16, 0);
 }
 
 // Also test __c11_atomic_is_lock_free() with a 16 byte size.
-// CHECK-LABEL: @fun_c11_is_lock_free(
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    ret i1 true
-//
-_Bool fun_c11_is_lock_free() {
+// CHECK-LABEL: @fun4
+// CHECK:       ret i1 true
+_Bool fun4() {
   return __c11_atomic_is_lock_free(16);
 }
 
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c
index f8e39996ad6d51f..715924abbff0133 100644
--- a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c
+++ b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c
@@ -9,20 +9,20 @@
 
 __int128 Int128_Al8 __attribute__((aligned(8)));
 
-// CHECK-LABEL: @fun_PtrAl16_is_lock_free(
+// CHECK-LABEL: @fun0
 // CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CALL:%.*]] = tail call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef nonnull @Int128_Al16) #[[ATTR2]]
+// CHECK-NEXT:    [[CALL:%.*]] = tail call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef nonnull @Int128_Al8) #[[ATTR2:[0-9]+]]
 // CHECK-NEXT:    ret i1 [[CALL]]
 //
-_Bool fun_PtrAl8_is_lock_free() {
+_Bool fun0() {
   return __atomic_is_lock_free(16, &Int128_Al8);
 }
 
-// CHECK-LABEL: @fun_PtrAl16_always_lock_free(
+// CHECK-LABEL: @fun1
 // CHECK-NEXT:  entry:
 // CHECK-NEXT:    ret i1 false
 //
-_Bool fun_PtrAl8_always_lock_free() {
+_Bool fun1() {
   return __atomic_always_lock_free(16, &Int128_Al8);
 }
 

>From 41c8fc6eef4b743b82ea895ac1d5cbbc317ecfe2 Mon Sep 17 00:00:00 2001
From: Jonas Paulsson <paulson1 at linux.ibm.com>
Date: Sat, 25 Nov 2023 11:23:58 +0100
Subject: [PATCH 3/5] Rework testing again, adding the C library call as well.

---
 ...i128-16Al.c => atomic_is_lock_free-i128.c} | 36 ++++++++++++++-----
 .../gnu-atomic_is_lock_free-i128-8Al.c        | 28 ---------------
 2 files changed, 27 insertions(+), 37 deletions(-)
 rename clang/test/CodeGen/SystemZ/{gnu-atomic_is_lock_free-i128-16Al.c => atomic_is_lock_free-i128.c} (61%)
 delete mode 100644 clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c

diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c b/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
similarity index 61%
rename from clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
rename to clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
index 2e1565fcb2984cf..c32b02286455aeb 100644
--- a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-16Al.c
+++ b/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
@@ -1,7 +1,6 @@
 // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
 //
-// Test __atomic_is_lock_free() and __atomic_always_lock_free() for __int128
-// with 16 byte alignment.
+// Test __atomic_is_lock_free() and friends.
 
 #include <stdatomic.h>
 #include <stdint.h>
@@ -20,23 +19,42 @@ _Bool fun1() {
   return __atomic_always_lock_free(16, &Int128_Al16);
 }
 
-// Also test these with a 16 byte size and null-pointer.
+__int128 Int128_Al8 __attribute__((aligned(8)));
+
 // CHECK-LABEL: @fun2
-// CHECK:       ret i1 true
+// CHECK:    call zeroext i1 @__atomic_is_lock_free
 _Bool fun2() {
-  return __atomic_is_lock_free(16, 0);
+  return __atomic_is_lock_free(16, &Int128_Al8);
 }
 
 // CHECK-LABEL: @fun3
-// CHECK:       ret i1 true
+// CHECK:    ret i1 false
 _Bool fun3() {
-  return __atomic_always_lock_free(16, 0);
+  return __atomic_always_lock_free(16, &Int128_Al8);
 }
 
-// Also test __c11_atomic_is_lock_free() with a 16 byte size.
 // CHECK-LABEL: @fun4
 // CHECK:       ret i1 true
 _Bool fun4() {
-  return __c11_atomic_is_lock_free(16);
+  return __atomic_is_lock_free(16, 0);
+}
+
+// CHECK-LABEL: @fun5
+// CHECK:       ret i1 true
+_Bool fun5() {
+  return __atomic_always_lock_free(16, 0);
 }
 
+_Atomic __int128 AtomicI128;
+
+// CHECK-LABEL: @fun6
+// CHECK:       ret i1 true
+_Bool fun6() {
+  return atomic_is_lock_free(&AtomicI128);
+}
+
+// CHECK-LABEL: @fun7
+// CHECK:       ret i1 true
+_Bool fun7() {
+  return __c11_atomic_is_lock_free(16);
+}
diff --git a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c b/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c
deleted file mode 100644
index 715924abbff0133..000000000000000
--- a/clang/test/CodeGen/SystemZ/gnu-atomic_is_lock_free-i128-8Al.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
-// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
-//
-// Test __atomic_is_lock_free() and __atomic_always_lock_free() for __int128
-// with (default) 8 byte alignment.
-
-#include <stdatomic.h>
-#include <stdint.h>
-
-__int128 Int128_Al8 __attribute__((aligned(8)));
-
-// CHECK-LABEL: @fun0
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    [[CALL:%.*]] = tail call zeroext i1 @__atomic_is_lock_free(i64 noundef 16, ptr noundef nonnull @Int128_Al8) #[[ATTR2:[0-9]+]]
-// CHECK-NEXT:    ret i1 [[CALL]]
-//
-_Bool fun0() {
-  return __atomic_is_lock_free(16, &Int128_Al8);
-}
-
-// CHECK-LABEL: @fun1
-// CHECK-NEXT:  entry:
-// CHECK-NEXT:    ret i1 false
-//
-_Bool fun1() {
-  return __atomic_always_lock_free(16, &Int128_Al8);
-}
-

>From 56f321dc85137c698a42cbc9d236b9d18d3d8f29 Mon Sep 17 00:00:00 2001
From: Jonas Paulsson <paulson1 at linux.ibm.com>
Date: Mon, 27 Nov 2023 18:14:09 +0100
Subject: [PATCH 4/5] Fix testing (IP)

---
 .../CodeGen/SystemZ/atomic_is_lock_free-i128.c     | 14 +++++++++++++-
 1 file changed, 13 insertions(+), 1 deletion(-)

diff --git a/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c b/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
index c32b02286455aeb..549f39e9310e3fa 100644
--- a/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
+++ b/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
@@ -50,11 +50,23 @@ _Atomic __int128 AtomicI128;
 // CHECK-LABEL: @fun6
 // CHECK:       ret i1 true
 _Bool fun6() {
-  return atomic_is_lock_free(&AtomicI128);
+  return __atomic_is_lock_free(16, &AtomicI128);
 }
 
 // CHECK-LABEL: @fun7
 // CHECK:       ret i1 true
 _Bool fun7() {
+  return __atomic_always_lock_free(16, &AtomicI128);
+}
+
+// CHECK-LABEL: @fun8
+// CHECK:       ret i1 true
+_Bool fun8() {
+  return atomic_is_lock_free(&AtomicI128);
+}
+
+// CHECK-LABEL: @fun9
+// CHECK:       ret i1 true
+_Bool fun9() {
   return __c11_atomic_is_lock_free(16);
 }

>From 63d4bd5f2bee145d13b2f6eb1f341cb84025ea30 Mon Sep 17 00:00:00 2001
From: Jonas Paulsson <paulson1 at linux.ibm.com>
Date: Tue, 28 Nov 2023 15:50:17 +0100
Subject: [PATCH 5/5] Rework testing again.

---
 clang/test/CodeGen/SystemZ/atomic-alignment.c | 35 -------
 .../SystemZ/atomic_is_lock_free-i128.c        | 72 --------------
 .../CodeGen/SystemZ/atomic_is_lock_free.c     | 98 +++++++++++++++++++
 3 files changed, 98 insertions(+), 107 deletions(-)
 delete mode 100644 clang/test/CodeGen/SystemZ/atomic-alignment.c
 delete mode 100644 clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
 create mode 100644 clang/test/CodeGen/SystemZ/atomic_is_lock_free.c

diff --git a/clang/test/CodeGen/SystemZ/atomic-alignment.c b/clang/test/CodeGen/SystemZ/atomic-alignment.c
deleted file mode 100644
index da478842ca31b2b..000000000000000
--- a/clang/test/CodeGen/SystemZ/atomic-alignment.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// RUN: %clang_cc1 -triple s390x-linux-gnu -O3 -emit-llvm %s -o - | FileCheck %s
-//
-// Test alignment of 128 bit Atomic int/fp types, as well as loading
-// from memory with a simple addition. The fp128 is loaded as i128 and
-// then casted.
-
-// CHECK: @Atomic_int128 = {{.*}} i128 0, align 16
-// CHECK: @Atomic_fp128 = {{.*}} fp128 0xL00000000000000000000000000000000, align 16
-
-// CHECK-LABEL:  @f1
-// CHECK:      %atomic-load = load atomic i128, ptr @Atomic_int128 seq_cst, align 16
-// CHECK-NEXT: %add = add nsw i128 %atomic-load, 1
-// CHECK-NEXT: store i128 %add, ptr %agg.result, align 8
-// CHECK-NEXT: ret void
-
-// CHECK-LABEL:  @f2
-// CHECK:      %atomic-load = load atomic i128, ptr @Atomic_fp128 seq_cst, align 16
-// CHECK-NEXT: %0 = bitcast i128 %atomic-load to fp128
-// CHECK-NEXT: %add = fadd fp128 %0, 0xL00000000000000003FFF000000000000
-// CHECK-NEXT: store fp128 %add, ptr %agg.result, align 8
-// CHECK-NEXT: ret void
-
-
-#include <stdatomic.h>
-
-_Atomic __int128    Atomic_int128;
-_Atomic long double Atomic_fp128;
-
-__int128 f1() {
-  return Atomic_int128 + 1;
-}
-
-long double f2() {
-  return Atomic_fp128 + 1.0;
-}
diff --git a/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c b/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
deleted file mode 100644
index 549f39e9310e3fa..000000000000000
--- a/clang/test/CodeGen/SystemZ/atomic_is_lock_free-i128.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
-//
-// Test __atomic_is_lock_free() and friends.
-
-#include <stdatomic.h>
-#include <stdint.h>
-
-__int128 Int128_Al16 __attribute__((aligned(16)));
-
-// CHECK-LABEL: @fun0
-// CHECK:       tail call zeroext i1 @__atomic_is_lock_free
-_Bool fun0() {
-  return __atomic_is_lock_free(16, &Int128_Al16);
-}
-
-// CHECK-LABEL: @fun1
-// CHECK:       ret i1 false
-_Bool fun1() {
-  return __atomic_always_lock_free(16, &Int128_Al16);
-}
-
-__int128 Int128_Al8 __attribute__((aligned(8)));
-
-// CHECK-LABEL: @fun2
-// CHECK:    call zeroext i1 @__atomic_is_lock_free
-_Bool fun2() {
-  return __atomic_is_lock_free(16, &Int128_Al8);
-}
-
-// CHECK-LABEL: @fun3
-// CHECK:    ret i1 false
-_Bool fun3() {
-  return __atomic_always_lock_free(16, &Int128_Al8);
-}
-
-// CHECK-LABEL: @fun4
-// CHECK:       ret i1 true
-_Bool fun4() {
-  return __atomic_is_lock_free(16, 0);
-}
-
-// CHECK-LABEL: @fun5
-// CHECK:       ret i1 true
-_Bool fun5() {
-  return __atomic_always_lock_free(16, 0);
-}
-
-_Atomic __int128 AtomicI128;
-
-// CHECK-LABEL: @fun6
-// CHECK:       ret i1 true
-_Bool fun6() {
-  return __atomic_is_lock_free(16, &AtomicI128);
-}
-
-// CHECK-LABEL: @fun7
-// CHECK:       ret i1 true
-_Bool fun7() {
-  return __atomic_always_lock_free(16, &AtomicI128);
-}
-
-// CHECK-LABEL: @fun8
-// CHECK:       ret i1 true
-_Bool fun8() {
-  return atomic_is_lock_free(&AtomicI128);
-}
-
-// CHECK-LABEL: @fun9
-// CHECK:       ret i1 true
-_Bool fun9() {
-  return __c11_atomic_is_lock_free(16);
-}
diff --git a/clang/test/CodeGen/SystemZ/atomic_is_lock_free.c b/clang/test/CodeGen/SystemZ/atomic_is_lock_free.c
new file mode 100644
index 000000000000000..32c436eaf36ddad
--- /dev/null
+++ b/clang/test/CodeGen/SystemZ/atomic_is_lock_free.c
@@ -0,0 +1,98 @@
+// RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s
+//
+// Test __atomic_is_lock_free() and friends.
+
+#include <stdatomic.h>
+#include <stdint.h>
+
+typedef __attribute__((aligned(16))) __int128 __int128_Al16;
+
+_Atomic __int128 Int128_Atomic;
+__int128_Al16    Int128_Al16;
+__int128         Int128;
+struct { int I[3]; } _Atomic AtomicStruct;
+_Atomic long double Atomic_fp128; // Also check the alignment of this.
+
+// Check alignments of the variables. @AtomicStruct gets padded and its size
+// and alignment becomes 16. Only a power-of-2 size is considered, so 16 (not
+// 12) needs to be specified with the intrinsics below.
+//
+// CHECK: %struct.anon = type { [3 x i32] }
+// CHECK: @Int128 = {{.*}} i128 0, align 8
+// CHECK: @Int128_Atomic = {{.*}} i128 0, align 16
+// CHECK: @Int128_Al16 = {{.*}} i128 0, align 16
+// CHECK: @AtomicStruct = {{.*}} { %struct.anon, [4 x i8] } zeroinitializer, align 16
+// CHECK: @Atomic_fp128 = {{.*}} fp128 0xL00000000000000000000000000000000, align 16
+
+
+// CHECK-LABEL: @fun0
+// CHECK:       ret i1 true
+_Bool fun0() {
+  return __atomic_is_lock_free(16, &Int128_Atomic);
+}
+
+// CHECK-LABEL: @fun1
+// CHECK:       ret i1 true
+_Bool fun1() {
+  return __atomic_always_lock_free(16, &Int128_Atomic);
+}
+
+// CHECK-LABEL: @fun2
+// CHECK:       ret i1 true
+_Bool fun2() {
+  return __atomic_is_lock_free(16, &Int128_Al16);
+}
+
+// CHECK-LABEL: @fun3
+// CHECK:       ret i1 true
+_Bool fun3() {
+  return __atomic_always_lock_free(16, &Int128_Al16);
+}
+
+// CHECK-LABEL: @fun4
+// CHECK:    call zeroext i1 @__atomic_is_lock_free
+_Bool fun4() {
+  return __atomic_is_lock_free(16, &Int128);
+}
+
+// CHECK-LABEL: @fun5
+// CHECK:    ret i1 false
+_Bool fun5() {
+  return __atomic_always_lock_free(16, &Int128);
+}
+
+// CHECK-LABEL: @fun6
+// CHECK:       ret i1 true
+_Bool fun6() {
+  return __atomic_is_lock_free(16, 0);
+}
+
+// CHECK-LABEL: @fun7
+// CHECK:       ret i1 true
+_Bool fun7() {
+  return __atomic_always_lock_free(16, 0);
+}
+
+// CHECK-LABEL: @fun8
+// CHECK:       ret i1 true
+_Bool fun8() {
+  return __atomic_is_lock_free(16, &AtomicStruct);
+}
+
+// CHECK-LABEL: @fun9
+// CHECK:       ret i1 true
+_Bool fun9() {
+  return __atomic_always_lock_free(16, &AtomicStruct);
+}
+
+// CHECK-LABEL: @fun10
+// CHECK:       ret i1 true
+_Bool fun10() {
+  return atomic_is_lock_free(&Int128_Atomic);
+}
+
+// CHECK-LABEL: @fun11
+// CHECK:       ret i1 true
+_Bool fun11() {
+  return __c11_atomic_is_lock_free(16);
+}



More information about the cfe-commits mailing list