[clang] [llvm] [LoongArch] Support amcas[_db].{b/h/w/d} instructions. (PR #114189)

via cfe-commits cfe-commits at lists.llvm.org
Tue Nov 26 02:48:48 PST 2024


https://github.com/tangaac updated https://github.com/llvm/llvm-project/pull/114189

>From 88daebae45de7ac225629066f10e8b9765f54e9b Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Wed, 23 Oct 2024 17:22:25 +0800
Subject: [PATCH 1/4] Support amcas[_db].{b/h/w/d} instructions.

---
 clang/include/clang/Driver/Options.td         |    4 +
 clang/lib/Basic/Targets/LoongArch.cpp         |    7 +-
 clang/lib/Basic/Targets/LoongArch.h           |    2 +
 .../lib/Driver/ToolChains/Arch/LoongArch.cpp  |    9 +
 clang/test/Driver/loongarch-march.c           |    8 +-
 clang/test/Driver/loongarch-mlamcas.c         |   30 +
 clang/test/Preprocessor/init-loongarch.c      |   25 +-
 .../TargetParser/LoongArchTargetParser.def    |    3 +-
 .../llvm/TargetParser/LoongArchTargetParser.h |    4 +
 llvm/lib/Target/LoongArch/LoongArch.td        |    9 +-
 .../LoongArch/LoongArchISelLowering.cpp       |   16 +-
 .../Target/LoongArch/LoongArchInstrInfo.td    |   24 +-
 .../TargetParser/LoongArchTargetParser.cpp    |    1 +
 .../ir-instruction/atomic-cmpxchg.ll          |   79 +-
 .../ir-instruction/atomicrmw-lamcas.ll        | 5341 +++++++++++++++++
 15 files changed, 5542 insertions(+), 20 deletions(-)
 create mode 100644 clang/test/Driver/loongarch-mlamcas.c
 create mode 100644 llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll

diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 40fd48761928b3..3be446e12b30ea 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -5413,6 +5413,10 @@ def mlam_bh : Flag<["-"], "mlam-bh">, Group<m_loongarch_Features_Group>,
   HelpText<"Enable amswap[_db].{b/h} and amadd[_db].{b/h}">;
 def mno_lam_bh : Flag<["-"], "mno-lam-bh">, Group<m_loongarch_Features_Group>,
   HelpText<"Disable amswap[_db].{b/h} and amadd[_db].{b/h}">;
+def mlamcas : Flag<["-"], "mlamcas">, Group<m_loongarch_Features_Group>,
+  HelpText<"Enable amcas[_db].{b/h/w/d}">;
+def mno_lamcas : Flag<["-"], "mno-lamcas">, Group<m_loongarch_Features_Group>,
+  HelpText<"Disable amcas[_db].{b/h/w/d}">;
 def mld_seq_sa : Flag<["-"], "mld-seq-sa">, Group<m_loongarch_Features_Group>,
   HelpText<"Do not generate load-load barrier instructions (dbar 0x700)">;
 def mno_ld_seq_sa : Flag<["-"], "mno-ld-seq-sa">, Group<m_loongarch_Features_Group>,
diff --git a/clang/lib/Basic/Targets/LoongArch.cpp b/clang/lib/Basic/Targets/LoongArch.cpp
index 3f2d7317532aaf..145c32bd27525a 100644
--- a/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/clang/lib/Basic/Targets/LoongArch.cpp
@@ -205,7 +205,7 @@ void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,
       // TODO: As more features of the V1.1 ISA are supported, a unified "v1.1"
       // arch feature set will be used to include all sub-features belonging to
       // the V1.1 ISA version.
-      if (HasFeatureFrecipe && HasFeatureLAM_BH && HasFeatureLD_SEQ_SA)
+      if (HasFeatureFrecipe && HasFeatureLAM_BH && HasFeatureLAMCAS && HasFeatureLD_SEQ_SA)
         Builder.defineMacro("__loongarch_arch",
                             Twine('"') + "la64v1.1" + Twine('"'));
       else
@@ -239,6 +239,9 @@ void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,
   if (HasFeatureLAM_BH)
     Builder.defineMacro("__loongarch_lam_bh", Twine(1));
 
+  if (HasFeatureLAMCAS)
+    Builder.defineMacro("__loongarch_lamcas", Twine(1));
+
   if (HasFeatureLD_SEQ_SA)
     Builder.defineMacro("__loongarch_ld_seq_sa", Twine(1));
 
@@ -320,6 +323,8 @@ bool LoongArchTargetInfo::handleTargetFeatures(
       HasFeatureFrecipe = true;
     else if (Feature == "+lam-bh")
       HasFeatureLAM_BH = true;
+    else if (Feature == "+lamcas")
+      HasFeatureLAMCAS = true;
     else if (Feature == "+ld-seq-sa")
       HasFeatureLD_SEQ_SA = true;
   }
diff --git a/clang/lib/Basic/Targets/LoongArch.h b/clang/lib/Basic/Targets/LoongArch.h
index e5eae7a8fcf677..59e2bdeff7de89 100644
--- a/clang/lib/Basic/Targets/LoongArch.h
+++ b/clang/lib/Basic/Targets/LoongArch.h
@@ -31,6 +31,7 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
   bool HasFeatureLASX;
   bool HasFeatureFrecipe;
   bool HasFeatureLAM_BH;
+  bool HasFeatureLAMCAS;
   bool HasFeatureLD_SEQ_SA;
 
 public:
@@ -42,6 +43,7 @@ class LLVM_LIBRARY_VISIBILITY LoongArchTargetInfo : public TargetInfo {
     HasFeatureLASX = false;
     HasFeatureFrecipe = false;
     HasFeatureLAM_BH = false;
+    HasFeatureLAMCAS = false;
     HasFeatureLD_SEQ_SA = false;
     LongDoubleWidth = 128;
     LongDoubleAlign = 128;
diff --git a/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp b/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
index 67b71a3ec623e4..caa645d0ff46be 100644
--- a/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/LoongArch.cpp
@@ -275,6 +275,15 @@ void loongarch::getLoongArchTargetFeatures(const Driver &D,
       Features.push_back("-lam-bh");
   }
 
+  // Select lamcas feature determined by -m[no-]lamcas.
+  if (const Arg *A =
+          Args.getLastArg(options::OPT_mlamcas, options::OPT_mno_lamcas)) {
+    if (A->getOption().matches(options::OPT_mlamcas))
+      Features.push_back("+lamcas");
+    else
+      Features.push_back("-lamcas");
+  }
+
   // Select ld-seq-sa feature determined by -m[no-]ld-seq-sa.
   if (const Arg *A = Args.getLastArg(options::OPT_mld_seq_sa,
                                      options::OPT_mno_ld_seq_sa)) {
diff --git a/clang/test/Driver/loongarch-march.c b/clang/test/Driver/loongarch-march.c
index c7091336f3bc80..b3a6557f6f231b 100644
--- a/clang/test/Driver/loongarch-march.c
+++ b/clang/test/Driver/loongarch-march.c
@@ -39,21 +39,21 @@
 
 // CC1-LA64V1P1: "-target-cpu" "loongarch64"
 // CC1-LA64V1P1-NOT: "-target-feature"
-// CC1-LA64V1P1: "-target-feature" "+64bit" "-target-feature" "+d" "-target-feature" "+lsx" "-target-feature" "+ual" "-target-feature" "+frecipe" "-target-feature" "+lam-bh" "-target-feature" "+ld-seq-sa"
+// CC1-LA64V1P1: "-target-feature" "+64bit" "-target-feature" "+d" "-target-feature" "+lsx" "-target-feature" "+ual" "-target-feature" "+frecipe" "-target-feature" "+lam-bh" "-target-feature" "+lamcas" "-target-feature" "+ld-seq-sa"
 // CC1-LA64V1P1-NOT: "-target-feature"
 // CC1-LA64V1P1: "-target-abi" "lp64d"
 
 // CC1-LA664: "-target-cpu" "la664"
 // CC1-LA664-NOT: "-target-feature"
-// CC1-LA664: "-target-feature" "+64bit" "-target-feature" "+f" "-target-feature" "+d" "-target-feature" "+lsx" "-target-feature" "+lasx" "-target-feature" "+ual" "-target-feature" "+frecipe" "-target-feature" "+lam-bh" "-target-feature" "+ld-seq-sa"
+// CC1-LA664: "-target-feature" "+64bit" "-target-feature" "+f" "-target-feature" "+d" "-target-feature" "+lsx" "-target-feature" "+lasx" "-target-feature" "+ual" "-target-feature" "+frecipe" "-target-feature" "+lam-bh" "-target-feature" "+lamcas" "-target-feature" "+ld-seq-sa"
 // CC1-LA664-NOT: "-target-feature"
 // CC1-LA664: "-target-abi" "lp64d"
 
 // IR-LOONGARCH64: attributes #[[#]] ={{.*}}"target-cpu"="loongarch64" {{.*}}"target-features"="+64bit,+d,+f,+ual"
 // IR-LA464: attributes #[[#]] ={{.*}}"target-cpu"="la464" {{.*}}"target-features"="+64bit,+d,+f,+lasx,+lsx,+ual"
 // IR-LA64V1P0: attributes #[[#]] ={{.*}}"target-cpu"="loongarch64" {{.*}}"target-features"="+64bit,+d,+lsx,+ual"
-// IR-LA64V1P1: attributes #[[#]] ={{.*}}"target-cpu"="loongarch64" {{.*}}"target-features"="+64bit,+d,+frecipe,+lam-bh,+ld-seq-sa,+lsx,+ual"
-// IR-LA664: attributes #[[#]] ={{.*}}"target-cpu"="la664" {{.*}}"target-features"="+64bit,+d,+f,+frecipe,+lam-bh,+lasx,+ld-seq-sa,+lsx,+ual"
+// IR-LA64V1P1: attributes #[[#]] ={{.*}}"target-cpu"="loongarch64" {{.*}}"target-features"="+64bit,+d,+frecipe,+lam-bh,+lamcas,+ld-seq-sa,+lsx,+ual"
+// IR-LA664: attributes #[[#]] ={{.*}}"target-cpu"="la664" {{.*}}"target-features"="+64bit,+d,+f,+frecipe,+lam-bh,+lamcas,+ld-seq-sa,+lasx,+lsx,+ual"
 
 int foo(void) {
   return 3;
diff --git a/clang/test/Driver/loongarch-mlamcas.c b/clang/test/Driver/loongarch-mlamcas.c
new file mode 100644
index 00000000000000..2185a1a8115d66
--- /dev/null
+++ b/clang/test/Driver/loongarch-mlamcas.c
@@ -0,0 +1,30 @@
+/// Test -m[no]lamcas options.
+
+// RUN: %clang --target=loongarch64 -mlamcas -fsyntax-only %s -### 2>&1 | \
+// RUN:     FileCheck %s --check-prefix=CC1-LAMCAS
+// RUN: %clang --target=loongarch64 -mno-lamcas -fsyntax-only %s -### 2>&1 | \
+// RUN:     FileCheck %s --check-prefix=CC1-NO-LAMCAS
+// RUN: %clang --target=loongarch64 -mno-lamcas -mlamcas -fsyntax-only %s -### 2>&1 | \
+// RUN:     FileCheck %s --check-prefix=CC1-LAMCAS
+// RUN: %clang --target=loongarch64  -mlamcas -mno-lamcas -fsyntax-only %s -### 2>&1 | \
+// RUN:     FileCheck %s --check-prefix=CC1-NO-LAMCAS
+
+// RUN: %clang --target=loongarch64 -mlamcas -S -emit-llvm %s -o - | \
+// RUN: FileCheck %s --check-prefix=IR-LAMCAS
+// RUN: %clang --target=loongarch64 -mno-lamcas -S -emit-llvm %s -o - | \
+// RUN: FileCheck %s --check-prefix=IR-NO-LAMCAS
+// RUN: %clang --target=loongarch64 -mno-lamcas -mlamcas -S -emit-llvm %s -o - | \
+// RUN: FileCheck %s --check-prefix=IR-LAMCAS
+// RUN: %clang --target=loongarch64 -mlamcas -mno-lamcas -S -emit-llvm %s -o - | \
+// RUN: FileCheck %s --check-prefix=IR-NO-LAMCAS
+
+
+// CC1-LAMCAS: "-target-feature" "+lamcas"
+// CC1-NO-LAMCAS: "-target-feature" "-lamcas"
+
+// IR-LAMCAS: attributes #[[#]] ={{.*}}"target-features"="{{(.*,)?}}+lamcas{{(,.*)?}}"
+// IR-NO-LAMCAS: attributes #[[#]] ={{.*}}"target-features"="{{(.*,)?}}-lamcas{{(,.*)?}}"
+
+int foo(void) {
+  return 42;
+}
diff --git a/clang/test/Preprocessor/init-loongarch.c b/clang/test/Preprocessor/init-loongarch.c
index 0e3320f01b328c..aa8b1bbc1e7aea 100644
--- a/clang/test/Preprocessor/init-loongarch.c
+++ b/clang/test/Preprocessor/init-loongarch.c
@@ -798,7 +798,7 @@
 // LA64-FPU0-LP64S-NOT: #define __loongarch_single_float
 // LA64-FPU0-LP64S: #define __loongarch_soft_float 1
 
-/// Check __loongarch_arch{_tune/_frecipe/_lam_bh/_ld_seq_sa}.
+/// Check __loongarch_arch{_tune/_frecipe/_lam_bh/_lamcas/_ld_seq_sa}.
 
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - | \
 // RUN:   FileCheck --match-full-lines --check-prefix=ARCH-TUNE -DARCH=la64v1.0 -DTUNE=loongarch64 %s
@@ -823,11 +823,11 @@
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +lsx | \
 // RUN:   FileCheck --match-full-lines --check-prefix=ARCH-TUNE -DARCH=la64v1.0 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 | \
-// RUN:   FileCheck --match-full-lines  --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LD-SEQ-SA -DARCH=la64v1.1 -DTUNE=loongarch64 %s
+// RUN:   FileCheck --match-full-lines  --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LAMCAS,LD-SEQ-SA -DARCH=la64v1.1 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 -Xclang -target-feature -Xclang -frecipe | \
-// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAM-BH,LD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAM-BH,LAMCAS,lD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 -Xclang -target-feature -Xclang -lsx | \
-// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LD-SEQ-SA -DARCH=loongarch64 -DTUNE=loongarch64 %s
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LAMCAS,LD-SEQ-SA -DARCH=loongarch64 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +frecipe | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE -DARCH=loongarch64 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +lsx -Xclang -target-feature -Xclang +frecipe | \
@@ -835,11 +835,19 @@
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.0 -Xclang -target-feature -Xclang +lam-bh | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAM-BH -DARCH=la64v1.0 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 -Xclang -target-feature -Xclang -lam-bh | \
-// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAMCAS,LD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +lam-bh | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAM-BH -DARCH=loongarch64 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +lsx -Xclang -target-feature -Xclang +lam-bh | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAM-BH -DARCH=la64v1.0 -DTUNE=loongarch64 %s
+// RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.0 -Xclang -target-feature -Xclang +lamcas | \
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAMCAS -DARCH=la64v1.0 -DTUNE=loongarch64 %s
+// RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 -Xclang -target-feature -Xclang -lamcas | \
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
+// RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +lamcas | \
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAMCAS -DARCH=loongarch64 -DTUNE=loongarch64 %s
+// RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +lsx -Xclang -target-feature -Xclang +lamcas | \
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAMCAS -DARCH=la64v1.0 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.0 -Xclang -target-feature -Xclang +ld-seq-sa | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 -Xclang -target-feature -Xclang -ld-seq-sa | \
@@ -848,20 +856,21 @@
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LD-SEQ-SA -DARCH=loongarch64 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +lsx -Xclang -target-feature -Xclang +ld-seq-sa | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
-// RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.0 -Xclang -target-feature -Xclang +frecipe -Xclang -target-feature -Xclang +lam-bh  -Xclang -target-feature -Xclang +ld-seq-sa | \
+// RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.0 -Xclang -target-feature -Xclang +frecipe -Xclang -target-feature -Xclang +lam-bh  -Xclang -target-feature -Xclang +lamcas -Xclang -target-feature -Xclang +ld-seq-sa | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE -DARCH=la64v1.1 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la664 | \
-// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LD-SEQ-SA -DARCH=la664 -DTUNE=la664 %s
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LAMCAS,LD-SEQ-SA -DARCH=la664 -DTUNE=la664 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -mtune=la664 | \
 // RUN:   FileCheck --match-full-lines --check-prefix=ARCH-TUNE -DARCH=la64v1.0 -DTUNE=la664 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -mtune=la664 | \
 // RUN:   FileCheck --match-full-lines --check-prefix=ARCH-TUNE -DARCH=loongarch64 -DTUNE=la664 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la664 -mtune=loongarch64 | \
-// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LD-SEQ-SA -DARCH=la664 -DTUNE=loongarch64 %s
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LAMCAS,LD-SEQ-SA -DARCH=la664 -DTUNE=loongarch64 %s
 
 // ARCH-TUNE: #define __loongarch_arch "[[ARCH]]"
 // FRECIPE: #define __loongarch_frecipe 1
 // LAM-BH: #define __loongarch_lam_bh 1
+// LAMCAS: #define __loongarch_lamcas 1
 // LD-SEQ-SA: #define __loongarch_ld_seq_sa 1
 // ARCH-TUNE: #define __loongarch_tune "[[TUNE]]"
 
diff --git a/llvm/include/llvm/TargetParser/LoongArchTargetParser.def b/llvm/include/llvm/TargetParser/LoongArchTargetParser.def
index 324d5c18e6dea3..78f3d0bee85e83 100644
--- a/llvm/include/llvm/TargetParser/LoongArchTargetParser.def
+++ b/llvm/include/llvm/TargetParser/LoongArchTargetParser.def
@@ -12,6 +12,7 @@ LOONGARCH_FEATURE("+lvz", FK_LVZ)
 LOONGARCH_FEATURE("+ual", FK_UAL)
 LOONGARCH_FEATURE("+frecipe", FK_FRECIPE)
 LOONGARCH_FEATURE("+lam-bh", FK_LAM_BH)
+LOONGARCH_FEATURE("+lamcas", FK_LAMCAS)
 LOONGARCH_FEATURE("+ld-seq-sa", FK_LD_SEQ_SA)
 
 #undef LOONGARCH_FEATURE
@@ -22,6 +23,6 @@ LOONGARCH_FEATURE("+ld-seq-sa", FK_LD_SEQ_SA)
 
 LOONGARCH_ARCH("loongarch64", AK_LOONGARCH64, FK_64BIT | FK_FP32 | FK_FP64 | FK_UAL)
 LOONGARCH_ARCH("la464", AK_LA464, FK_64BIT | FK_FP32 | FK_FP64 | FK_LSX | FK_LASX | FK_UAL)
-LOONGARCH_ARCH("la664", AK_LA664, FK_64BIT | FK_FP32 | FK_FP64 | FK_LSX | FK_LASX | FK_UAL | FK_FRECIPE | FK_LAM_BH | FK_LD_SEQ_SA)
+LOONGARCH_ARCH("la664", AK_LA664, FK_64BIT | FK_FP32 | FK_FP64 | FK_LSX | FK_LASX | FK_UAL | FK_FRECIPE | FK_LAM_BH | FK_LAMCAS | FK_LD_SEQ_SA)
 
 #undef LOONGARCH_ARCH
diff --git a/llvm/include/llvm/TargetParser/LoongArchTargetParser.h b/llvm/include/llvm/TargetParser/LoongArchTargetParser.h
index 00957b84ab576c..c0012c68fb7dbd 100644
--- a/llvm/include/llvm/TargetParser/LoongArchTargetParser.h
+++ b/llvm/include/llvm/TargetParser/LoongArchTargetParser.h
@@ -54,6 +54,10 @@ enum FeatureKind : uint32_t {
   // available.
   FK_LAM_BH = 1 << 10,
 
+  // Atomic memory compare and swap instructions for byte, half word, word and
+  // double word are available.
+  FK_LAMCAS = 1 << 11,
+
   // Do not generate load-load barrier instructions (dbar 0x700).
   FK_LD_SEQ_SA = 1 << 12,
 
diff --git a/llvm/lib/Target/LoongArch/LoongArch.td b/llvm/lib/Target/LoongArch/LoongArch.td
index 100bdba36c440c..e5ae61da5a6699 100644
--- a/llvm/lib/Target/LoongArch/LoongArch.td
+++ b/llvm/lib/Target/LoongArch/LoongArch.td
@@ -118,6 +118,12 @@ def FeatureLAM_BH
                         "Support amswap[_db].{b/h} and amadd[_db].{b/h} instructions.">;
 def HasLAM_BH : Predicate<"Subtarget->hasLAM_BH()">;
 
+// Atomic memory compare and swap instructions for byte, half word, word and double word
+def FeatureLAMCAS
+    : SubtargetFeature<"lamcas", "HasLAMCAS", "true",
+                        "Support amcas[_db].{b/h/w/d}.">;
+def HasLAMCAS : Predicate<"Subtarget->hasLAMCAS()">;
+
 def FeatureLD_SEQ_SA
     : SubtargetFeature<"ld-seq-sa", "HasLD_SEQ_SA", "true",
                         "Don't use load-load barrier (dbar 0x700).">;
@@ -165,7 +171,8 @@ def : ProcessorModel<"la664", NoSchedModel, [Feature64Bit,
                                              FeatureExtLVZ,
                                              FeatureExtLBT,
                                              FeatureFrecipe,
-                                             FeatureLAM_BH]>;
+                                             FeatureLAM_BH,
+                                             FeatureLAMCAS]>;
 
 //===----------------------------------------------------------------------===//
 // Define the LoongArch target.
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 1abb428175eea7..b64c6dc2165577 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -371,6 +371,10 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
   setPrefFunctionAlignment(Subtarget.getPrefFunctionAlignment());
   setPrefLoopAlignment(Subtarget.getPrefLoopAlignment());
   setMaxBytesForAlignment(Subtarget.getMaxBytesForAlignment());
+
+  // cmpxchg sizes down to 8 bits become legal if LAMCAS is available.
+  if (Subtarget.hasLAMCAS())
+    setMinCmpXchgSizeInBits(8);
 }
 
 bool LoongArchTargetLowering::isOffsetFoldingLegal(
@@ -5765,6 +5769,10 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
   }
 
   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
+  if (Subtarget.hasLAMCAS() &&
+      (AI->getOperation() == AtomicRMWInst::Nand || Size < 32))
+    return AtomicExpansionKind::CmpXChg;
+
   if (Size == 8 || Size == 16)
     return AtomicExpansionKind::MaskedIntrinsic;
   return AtomicExpansionKind::None;
@@ -5819,6 +5827,10 @@ getIntrinsicForMaskedAtomicRMWBinOp(unsigned GRLen,
 TargetLowering::AtomicExpansionKind
 LoongArchTargetLowering::shouldExpandAtomicCmpXchgInIR(
     AtomicCmpXchgInst *CI) const {
+
+  if (Subtarget.hasLAMCAS())
+    return AtomicExpansionKind::None;
+
   unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
   if (Size == 8 || Size == 16)
     return AtomicExpansionKind::MaskedIntrinsic;
@@ -6314,8 +6326,8 @@ bool LoongArchTargetLowering::hasAndNotCompare(SDValue Y) const {
 }
 
 ISD::NodeType LoongArchTargetLowering::getExtendForAtomicCmpSwapArg() const {
-  // TODO: LAMCAS will use amcas{_DB,}.[bhwd] which does not require extension.
-  return ISD::SIGN_EXTEND;
+  // LAMCAS will use amcas[_DB].{b/h/w/d} which does not require extension.
+  return Subtarget.hasLAMCAS() ? ISD::ANY_EXTEND : ISD::SIGN_EXTEND;
 }
 
 bool LoongArchTargetLowering::shouldSignExtendTypeInLibCall(
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 7993f4f1326937..dd41327abc8422 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -719,9 +719,9 @@ class AM_3R<bits<32> op>
 }
 
 class AMCAS_3R<bits<32> op>
-    : Fmt3R<op, (outs GPR:$rd_wb), (ins GPR:$rd, GPR:$rk, GPRMemAtomic:$rj),
+    : Fmt3R<op, (outs GPR:$dst), (ins GPR:$rd, GPR:$rk, GPRMemAtomic:$rj),
             "$rd, $rk, $rj"> {
-  let Constraints = "@earlyclobber $rd_wb, $rd_wb = $rd";
+  let Constraints = "@earlyclobber $dst, $dst = $rd";
   let IsAMCAS = 1;
 }
 } // hasSideEffects = 0, mayLoad = 1, mayStore = 1,
@@ -2119,6 +2119,26 @@ def : Pat<(atomic_load_sub_i16 GPR:$rj, GPR:$rk),
           (AMADD__DB_H (SUB_W R0, GPR:$rk), GPR:$rj)>;
 } // Predicates = [ IsLA64, HasLAM_BH ]
 
+let Predicates = [ HasLAMCAS, IsLA64 ] in {
+
+def : Pat<(atomic_cmp_swap_i8_monotonic GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS_B GPR:$cmp, GPR:$new, GPR:$addr)>;
+def : Pat<(atomic_cmp_swap_i16_monotonic GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS_H GPR:$cmp, GPR:$new, GPR:$addr)>;
+def : Pat<(atomic_cmp_swap_i32_monotonic GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS_W GPR:$cmp, GPR:$new, GPR:$addr)>;
+def : Pat<(atomic_cmp_swap_i64_monotonic GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS_D GPR:$cmp, GPR:$new, GPR:$addr)>;
+
+def : Pat<(atomic_cmp_swap_i8 GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS__DB_B GPR:$cmp, GPR:$new, GPR:$addr)>;
+def : Pat<(atomic_cmp_swap_i16 GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS__DB_H GPR:$cmp, GPR:$new, GPR:$addr)>;
+def : Pat<(atomic_cmp_swap_i32 GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS__DB_W GPR:$cmp, GPR:$new, GPR:$addr)>;
+def : Pat<(atomic_cmp_swap_i64 GPR:$addr, GPR:$cmp, GPR:$new),
+          (AMCAS__DB_D GPR:$cmp, GPR:$new, GPR:$addr)>;
+}
 
 let Predicates = [IsLA64] in {
 
diff --git a/llvm/lib/TargetParser/LoongArchTargetParser.cpp b/llvm/lib/TargetParser/LoongArchTargetParser.cpp
index 9b8407a73bea3f..9b42498b3d8953 100644
--- a/llvm/lib/TargetParser/LoongArchTargetParser.cpp
+++ b/llvm/lib/TargetParser/LoongArchTargetParser.cpp
@@ -53,6 +53,7 @@ bool LoongArch::getArchFeatures(StringRef Arch,
     if (Arch == "la64v1.1") {
       Features.push_back("+frecipe");
       Features.push_back("+lam-bh");
+      Features.push_back("+lamcas");
       Features.push_back("+ld-seq-sa");
     }
     return true;
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
index 4ff15f2b7e4481..bbc9052549117a 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
@@ -1,6 +1,7 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc --mtriple=loongarch64 -mattr=+d,-ld-seq-sa < %s | FileCheck %s --check-prefixes=LA64,NO-LD-SEQ-SA
+; RUN: llc --mtriple=loongarch64 -mattr=+d,-lamcas,-ld-seq-sa < %s | FileCheck %s --check-prefixes=LA64,NO-LD-SEQ-SA
 ; RUN: llc --mtriple=loongarch64 -mattr=+d,+ld-seq-sa < %s | FileCheck %s --check-prefixes=LA64,LD-SEQ-SA
+; RUN: llc --mtriple=loongarch64 -mattr=+d,+lamcas < %s | FileCheck %s --check-prefix=LA64-LAMCAS
 
 define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; LA64-LABEL: cmpxchg_i8_acquire_acquire:
@@ -27,6 +28,11 @@ define void @cmpxchg_i8_acquire_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB0_4:
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i8_acquire_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.b $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire acquire
   ret void
 }
@@ -57,6 +63,11 @@ define void @cmpxchg_i16_acquire_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB1_4:
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i16_acquire_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.h $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire acquire
   ret void
 }
@@ -77,6 +88,11 @@ define void @cmpxchg_i32_acquire_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB2_4:
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i32_acquire_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.w $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire acquire
   ret void
 }
@@ -96,6 +112,11 @@ define void @cmpxchg_i64_acquire_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
 ; LA64-NEXT:    dbar 20
 ; LA64-NEXT:  .LBB3_4:
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i64_acquire_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.d $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire acquire
   ret void
 }
@@ -302,6 +323,12 @@ define i8 @cmpxchg_i8_acquire_acquire_reti8(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:  .LBB8_4:
 ; LA64-NEXT:    srl.w $a0, $a5, $a3
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i8_acquire_acquire_reti8:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.b $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire acquire
   %res = extractvalue { i8, i1 } %tmp, 0
   ret i8 %res
@@ -334,6 +361,12 @@ define i16 @cmpxchg_i16_acquire_acquire_reti16(ptr %ptr, i16 %cmp, i16 %val) nou
 ; LA64-NEXT:  .LBB9_4:
 ; LA64-NEXT:    srl.w $a0, $a5, $a3
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i16_acquire_acquire_reti16:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.h $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire acquire
   %res = extractvalue { i16, i1 } %tmp, 0
   ret i16 %res
@@ -356,6 +389,12 @@ define i32 @cmpxchg_i32_acquire_acquire_reti32(ptr %ptr, i32 %cmp, i32 %val) nou
 ; LA64-NEXT:  .LBB10_4:
 ; LA64-NEXT:    move $a0, $a1
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i32_acquire_acquire_reti32:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.w $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire acquire
   %res = extractvalue { i32, i1 } %tmp, 0
   ret i32 %res
@@ -377,6 +416,12 @@ define i64 @cmpxchg_i64_acquire_acquire_reti64(ptr %ptr, i64 %cmp, i64 %val) nou
 ; LA64-NEXT:  .LBB11_4:
 ; LA64-NEXT:    move $a0, $a3
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i64_acquire_acquire_reti64:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.d $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire acquire
   %res = extractvalue { i64, i1 } %tmp, 0
   ret i64 %res
@@ -410,6 +455,14 @@ define i1 @cmpxchg_i8_acquire_acquire_reti1(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i8_acquire_acquire_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    amcas_db.b $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a1, $a3
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire acquire
   %res = extractvalue { i8, i1 } %tmp, 1
   ret i1 %res
@@ -444,6 +497,14 @@ define i1 @cmpxchg_i16_acquire_acquire_reti1(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; LA64-NEXT:    xor $a0, $a1, $a0
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i16_acquire_acquire_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    amcas_db.h $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a1, $a3
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire acquire
   %res = extractvalue { i16, i1 } %tmp, 1
   ret i1 %res
@@ -467,6 +528,14 @@ define i1 @cmpxchg_i32_acquire_acquire_reti1(ptr %ptr, i32 %cmp, i32 %val) nounw
 ; LA64-NEXT:    xor $a0, $a3, $a1
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i32_acquire_acquire_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    addi.w $a3, $a1, 0
+; LA64-LAMCAS-NEXT:    amcas_db.w $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a1, $a3
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire acquire
   %res = extractvalue { i32, i1 } %tmp, 1
   ret i1 %res
@@ -489,6 +558,14 @@ define i1 @cmpxchg_i64_acquire_acquire_reti1(ptr %ptr, i64 %cmp, i64 %val) nounw
 ; LA64-NEXT:    xor $a0, $a3, $a1
 ; LA64-NEXT:    sltui $a0, $a0, 1
 ; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i64_acquire_acquire_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a3, $a1
+; LA64-LAMCAS-NEXT:    amcas_db.d $a3, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a3, $a1
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire acquire
   %res = extractvalue { i64, i1 } %tmp, 1
   ret i1 %res
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll
new file mode 100644
index 00000000000000..2f677038e1db37
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll
@@ -0,0 +1,5341 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 -mattr=+d,-lamcas < %s | FileCheck %s --check-prefix=LA64
+; RUN: llc --mtriple=loongarch64 -mattr=+d,+lamcas < %s | FileCheck %s --check-prefix=LA64-LAMCAS
+
+define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB0_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB0_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB0_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB0_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB1_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB1_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 acquire
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB2_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB2_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB3_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB3_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB3_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB3_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB4_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB4_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 acquire
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB5_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB5_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB6_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB6_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB6_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB6_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB7_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB7_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB7_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB7_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB8_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB8_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB8_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB8_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB9_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB9_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB9_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB9_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB10_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB10_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB10_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB10_3: # in Loop: Header=BB10_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB10_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB10_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB10_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB11_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB11_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB11_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB11_3: # in Loop: Header=BB11_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB11_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB11_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB11_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB12_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB12_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB12_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB12_3: # in Loop: Header=BB12_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB12_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB12_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB12_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB13_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB13_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB13_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB13_3: # in Loop: Header=BB13_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB13_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB13_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB13_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB14_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB14_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB14_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB14_3: # in Loop: Header=BB14_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB14_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB14_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB14_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB15_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB15_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB15_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB15_3: # in Loop: Header=BB15_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB15_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB15_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB15_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB16_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB16_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB16_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB16_3: # in Loop: Header=BB16_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB16_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB16_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB16_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB17_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB17_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB17_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB17_3: # in Loop: Header=BB17_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB17_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB17_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB17_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b acquire
+  ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB18_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB18_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB18_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB18_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB19_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB19_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB19_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB19_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i32_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB20_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB20_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i32_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB20_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB20_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i32 %b acquire
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i64_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB21_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB21_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i64_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB21_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB21_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b acquire
+  ret i64 %1
+}
+
+define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB22_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB22_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB23_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB23_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB24_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB24_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB25_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB25_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB26_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB26_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_acquire:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB27_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB27_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB28_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB28_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB28_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB28_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB29_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB29_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 release
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB30_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB30_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB31_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB31_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB31_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB31_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB32_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB32_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 release
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB33_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB33_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB34_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB34_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB34_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB34_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB35_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB35_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB35_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB35_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB36_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB36_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB36_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB36_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB37_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB37_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB37_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB37_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB38_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB38_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB38_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB38_3: # in Loop: Header=BB38_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB38_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB38_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB38_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB39_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB39_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB39_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB39_3: # in Loop: Header=BB39_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB39_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB39_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB39_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB40_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB40_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB40_3: # in Loop: Header=BB40_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB40_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB40_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB40_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB41_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB41_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB41_3: # in Loop: Header=BB41_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB41_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB41_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB41_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB42_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB42_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB42_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB42_3: # in Loop: Header=BB42_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB42_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB42_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB42_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB43_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB43_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB43_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB43_3: # in Loop: Header=BB43_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB43_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB43_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB43_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB44_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB44_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB44_3: # in Loop: Header=BB44_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB44_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB44_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB44_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB45_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB45_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB45_3: # in Loop: Header=BB45_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB45_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB45_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB45_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b release
+  ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB46_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB46_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB46_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB46_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB47_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB47_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB47_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB47_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i32_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB48_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB48_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i32_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB48_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB48_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+    %1 = atomicrmw nand ptr %a, i32 %b release
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i64_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB49_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB49_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i64_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB49_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB49_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b release
+  ret i64 %1
+}
+
+define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB50_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB50_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB51_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB51_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB52_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB52_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB53_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB53_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB54_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB54_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_release:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB55_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB55_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB56_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB56_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB56_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB56_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB57_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB57_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 acq_rel
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB58_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB58_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB59_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB59_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB59_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB59_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB60_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB60_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 acq_rel
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB61_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB61_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB62_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB62_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB62_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB62_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB63_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB63_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB63_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB63_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB64_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB64_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB64_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB64_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB65_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB65_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB65_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB65_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB66_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB66_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB66_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB66_3: # in Loop: Header=BB66_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB66_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB66_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB66_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB67_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB67_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB67_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB67_3: # in Loop: Header=BB67_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB67_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB67_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB67_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB68_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB68_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB68_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB68_3: # in Loop: Header=BB68_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB68_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB68_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB68_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB69_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB69_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB69_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB69_3: # in Loop: Header=BB69_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB69_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB69_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB69_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB70_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB70_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB70_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB70_3: # in Loop: Header=BB70_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB70_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB70_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB70_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB71_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB71_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB71_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB71_3: # in Loop: Header=BB71_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB71_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB71_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB71_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB72_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB72_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB72_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB72_3: # in Loop: Header=BB72_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB72_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB72_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB72_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB73_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB73_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB73_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB73_3: # in Loop: Header=BB73_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB73_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB73_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB73_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b acq_rel
+  ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB74_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB74_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB74_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB74_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB75_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB75_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB75_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB75_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i32_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB76_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB76_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i32_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB76_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB76_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+    %1 = atomicrmw nand ptr %a, i32 %b acq_rel
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i64_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB77_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB77_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i64_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB77_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB77_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b acq_rel
+  ret i64 %1
+}
+
+
+
+define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB78_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB78_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB79_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB79_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB80_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB80_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB81_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB81_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB82_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB82_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_acq_rel:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB83_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB83_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB84_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB84_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB84_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB84_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB85_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB85_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 seq_cst
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB86_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB86_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB87_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB87_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB87_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB87_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB88_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB88_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 seq_cst
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor_db.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB89_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB89_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB90_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB90_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB90_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB90_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB91_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB91_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB91_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB91_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB92_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB92_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB92_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB92_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB93_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB93_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB93_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB93_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB94_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB94_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB94_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB94_3: # in Loop: Header=BB94_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB94_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB94_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB94_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB95_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB95_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB95_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB95_3: # in Loop: Header=BB95_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB95_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB95_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB95_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB96_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB96_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB96_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB96_3: # in Loop: Header=BB96_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB96_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB96_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB96_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB97_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB97_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB97_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB97_3: # in Loop: Header=BB97_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB97_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB97_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB97_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB98_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB98_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB98_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB98_3: # in Loop: Header=BB98_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB98_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB98_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB98_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB99_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB99_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB99_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB99_3: # in Loop: Header=BB99_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB99_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB99_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB99_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB100_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB100_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB100_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB100_3: # in Loop: Header=BB100_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB100_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB100_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB100_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB101_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB101_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB101_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB101_3: # in Loop: Header=BB101_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB101_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB101_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB101_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b seq_cst
+  ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB102_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB102_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB102_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB102_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB103_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB103_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB103_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB103_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i32_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB104_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB104_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i32_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB104_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB104_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+    %1 = atomicrmw nand ptr %a, i32 %b seq_cst
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i64_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB105_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB105_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i64_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB105_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB105_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b seq_cst
+  ret i64 %1
+}
+
+
+
+define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB106_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB106_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB107_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB107_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB108_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB108_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB109_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB109_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB110_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB110_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_seq_cst:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB111_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB111_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB112_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB112_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB112_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB112_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB113_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB113_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 0 monotonic
+  ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a2, $zero, 255
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB114_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB114_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i8 -1 monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB115_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    addi.w $a5, $a1, 0
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB115_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB115_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT:    amcas.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT:    bne $a2, $a3, .LBB115_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    move $a0, $a2
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    nor $a2, $a2, $zero
+; LA64-NEXT:    amand.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB116_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB116_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 0 monotonic
+  ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a1, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a2, 15
+; LA64-NEXT:    ori $a2, $a2, 4095
+; LA64-NEXT:    sll.w $a2, $a2, $a1
+; LA64-NEXT:    amor.w $a3, $a2, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a1
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a1, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB117_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB117_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xchg ptr %a, i16 -1 monotonic
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB118_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB118_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB118_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB118_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB119_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    add.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB119_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB119_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB119_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw add ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB120_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB120_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB120_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB120_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB121_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    sub.w $a5, $a4, $a1
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB121_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB121_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB121_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw sub ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB122_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB122_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB122_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB122_3: # in Loop: Header=BB122_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB122_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB122_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB122_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB123_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a6, $a1, .LBB123_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB123_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB123_3: # in Loop: Header=BB123_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB123_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB123_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB123_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umax ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB124_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB124_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB124_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB124_3: # in Loop: Header=BB124_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB124_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB124_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB124_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB125_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a6, $a4, $a3
+; LA64-NEXT:    move $a5, $a4
+; LA64-NEXT:    bgeu $a1, $a6, .LBB125_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB125_1 Depth=1
+; LA64-NEXT:    xor $a5, $a4, $a1
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:  .LBB125_3: # in Loop: Header=BB125_1 Depth=1
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB125_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB125_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB125_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw umin ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB126_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB126_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB126_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB126_3: # in Loop: Header=BB126_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB126_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB126_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB126_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB127_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a7, $a1, .LBB127_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB127_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB127_3: # in Loop: Header=BB127_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB127_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB127_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB127_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw max ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    ori $a4, $zero, 255
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.b $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    xori $a3, $a3, 56
+; LA64-NEXT:  .LBB128_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB128_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB128_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB128_3: # in Loop: Header=BB128_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB128_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB128_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB128_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a3, $a2, 24
+; LA64-NEXT:    lu12i.w $a4, 15
+; LA64-NEXT:    ori $a4, $a4, 4095
+; LA64-NEXT:    sll.w $a4, $a4, $a2
+; LA64-NEXT:    ext.w.h $a1, $a1
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    ori $a5, $zero, 48
+; LA64-NEXT:    sub.d $a3, $a5, $a3
+; LA64-NEXT:  .LBB129_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a5, $a0, 0
+; LA64-NEXT:    and $a7, $a5, $a4
+; LA64-NEXT:    move $a6, $a5
+; LA64-NEXT:    sll.w $a7, $a7, $a3
+; LA64-NEXT:    sra.w $a7, $a7, $a3
+; LA64-NEXT:    bge $a1, $a7, .LBB129_3
+; LA64-NEXT:  # %bb.2: # in Loop: Header=BB129_1 Depth=1
+; LA64-NEXT:    xor $a6, $a5, $a1
+; LA64-NEXT:    and $a6, $a6, $a4
+; LA64-NEXT:    xor $a6, $a5, $a6
+; LA64-NEXT:  .LBB129_3: # in Loop: Header=BB129_1 Depth=1
+; LA64-NEXT:    sc.w $a6, $a0, 0
+; LA64-NEXT:    beqz $a6, .LBB129_1
+; LA64-NEXT:  # %bb.4:
+; LA64-NEXT:    srl.w $a0, $a5, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB129_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB129_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw min ptr %a, i16 %b monotonic
+  ret i16 %1
+}
+
+
+
+define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB130_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB130_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB130_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB130_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:  .LBB131_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a4, $a0, 0
+; LA64-NEXT:    and $a5, $a4, $a1
+; LA64-NEXT:    nor $a5, $a5, $zero
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    and $a5, $a5, $a3
+; LA64-NEXT:    xor $a5, $a4, $a5
+; LA64-NEXT:    sc.w $a5, $a0, 0
+; LA64-NEXT:    beqz $a5, .LBB131_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    srl.w $a0, $a4, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB131_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB131_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+
+define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i32_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB132_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.w $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.w $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB132_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i32_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB132_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas.w $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB132_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+    %1 = atomicrmw nand ptr %a, i32 %b monotonic
+  ret i32 %1
+}
+
+define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i64_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:  .LBB133_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT:    ll.d $a2, $a0, 0
+; LA64-NEXT:    and $a3, $a2, $a1
+; LA64-NEXT:    nor $a3, $a3, $zero
+; LA64-NEXT:    sc.d $a3, $a0, 0
+; LA64-NEXT:    beqz $a3, .LBB133_1
+; LA64-NEXT:  # %bb.2:
+; LA64-NEXT:    move $a0, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i64_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB133_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    move $a3, $a0
+; LA64-LAMCAS-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-NEXT:    amcas.d $a0, $a4, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB133_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw nand ptr %a, i64 %b monotonic
+  ret i64 %1
+}
+
+
+
+define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    ori $a3, $zero, 255
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB134_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB134_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    lu12i.w $a3, 15
+; LA64-NEXT:    ori $a3, $a3, 4095
+; LA64-NEXT:    sll.w $a3, $a3, $a2
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    orn $a1, $a1, $a3
+; LA64-NEXT:    amand.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB135_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB135_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw and ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB136_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB136_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amor.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB137_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB137_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw or ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    andi $a1, $a1, 255
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB138_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB138_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_monotonic:
+; LA64:       # %bb.0:
+; LA64-NEXT:    slli.d $a2, $a0, 3
+; LA64-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT:    sll.w $a1, $a1, $a2
+; LA64-NEXT:    amxor.w $a3, $a1, $a0
+; LA64-NEXT:    srl.w $a0, $a3, $a2
+; LA64-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a2, $a0
+; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-NEXT:  .LBB139_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB139_1
+; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT:    ret
+  %1 = atomicrmw xor ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}

>From 4e088ed61270de65acc53025b45ad0e4fb4c4aa5 Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Mon, 18 Nov 2024 12:58:44 +0800
Subject: [PATCH 2/4] implement  emitExpandAtomicRMW to custom expand i8/i16
 atomicrmw and/or/xor

---
 .../LoongArch/LoongArchISelLowering.cpp       |   63 +-
 .../Target/LoongArch/LoongArchISelLowering.h  |    1 +
 .../ir-instruction/atomicrmw-lamcas.ll        | 2294 ++++++++++++++---
 3 files changed, 2025 insertions(+), 333 deletions(-)

diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index b64c6dc2165577..da4b8353ca2b01 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -5748,6 +5748,58 @@ bool LoongArchTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
   }
 }
 
+void LoongArchTargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
+  AtomicRMWInst::BinOp Op = AI->getOperation();
+
+  assert((Op == AtomicRMWInst::Or || Op == AtomicRMWInst::Xor ||
+          Op == AtomicRMWInst::And) &&
+         "Unable to expand");
+  unsigned MinWordSize = 4;
+
+  IRBuilder<> Builder(AI);
+  LLVMContext &Ctx = Builder.getContext();
+  const DataLayout &DL = AI->getDataLayout();
+  Type *ValueType = AI->getType();
+  Type *WordType = Type::getIntNTy(Ctx, MinWordSize * 8);
+
+  Value *Addr = AI->getPointerOperand();
+  PointerType *PtrTy = cast<PointerType>(Addr->getType());
+  IntegerType *IntTy = DL.getIndexType(Ctx, PtrTy->getAddressSpace());
+
+  Value *AlignedAddr = Builder.CreateIntrinsic(
+      Intrinsic::ptrmask, {PtrTy, IntTy},
+      {Addr, ConstantInt::get(IntTy, ~(uint64_t)(MinWordSize - 1))}, nullptr,
+      "AlignedAddr");
+
+  Value *AddrInt = Builder.CreatePtrToInt(Addr, IntTy);
+  Value *PtrLSB = Builder.CreateAnd(AddrInt, MinWordSize - 1, "PtrLSB");
+  Value *ShiftAmt = Builder.CreateShl(PtrLSB, 3);
+  ShiftAmt = Builder.CreateTrunc(ShiftAmt, WordType, "ShiftAmt");
+  Value *Mask = Builder.CreateShl(
+      ConstantInt::get(WordType,
+                       (1 << (DL.getTypeStoreSize(ValueType) * 8)) - 1),
+      ShiftAmt, "Mask");
+  Value *Inv_Mask = Builder.CreateNot(Mask, "Inv_Mask");
+  Value *ValOperand_Shifted =
+      Builder.CreateShl(Builder.CreateZExt(AI->getValOperand(), WordType),
+                        ShiftAmt, "ValOperand_Shifted");
+  Value *NewOperand;
+  if (Op == AtomicRMWInst::And)
+    NewOperand = Builder.CreateOr(ValOperand_Shifted, Inv_Mask, "AndOperand");
+  else
+    NewOperand = ValOperand_Shifted;
+
+  AtomicRMWInst *NewAI =
+      Builder.CreateAtomicRMW(Op, AlignedAddr, NewOperand, Align(MinWordSize),
+                              AI->getOrdering(), AI->getSyncScopeID());
+
+  Value *Shift = Builder.CreateLShr(NewAI, ShiftAmt, "shifted");
+  Value *Trunc = Builder.CreateTrunc(Shift, ValueType, "extracted");
+  Value *FinalOldResult = Builder.CreateBitCast(Trunc, ValueType);
+  AI->replaceAllUsesWith(FinalOldResult);
+  AI->eraseFromParent();
+}
+
 TargetLowering::AtomicExpansionKind
 LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
   // TODO: Add more AtomicRMWInst that needs to be extended.
@@ -5769,9 +5821,14 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
   }
 
   unsigned Size = AI->getType()->getPrimitiveSizeInBits();
-  if (Subtarget.hasLAMCAS() &&
-      (AI->getOperation() == AtomicRMWInst::Nand || Size < 32))
-    return AtomicExpansionKind::CmpXChg;
+  if (Subtarget.hasLAMCAS()) {
+    if (Size < 32 && (AI->getOperation() == AtomicRMWInst::And ||
+                      AI->getOperation() == AtomicRMWInst::Or ||
+                      AI->getOperation() == AtomicRMWInst::Xor))
+      return AtomicExpansionKind::Expand;
+    if ((AI->getOperation() == AtomicRMWInst::Nand || Size < 32))
+      return AtomicExpansionKind::CmpXChg;
+  }
 
   if (Size == 8 || Size == 16)
     return AtomicExpansionKind::MaskedIntrinsic;
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 1aa686695b49b8..da5892e1ab1252 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -192,6 +192,7 @@ class LoongArchTargetLowering : public TargetLowering {
   bool hasAndNot(SDValue Y) const override;
   TargetLowering::AtomicExpansionKind
   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+  void emitExpandAtomicRMW(AtomicRMWInst *AI) const override;
 
   Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
                                       Value *AlignedAddr, Value *Incr,
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll
index 2f677038e1db37..728dd778bdf240 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw-lamcas.ll
@@ -1,6 +1,10 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc --mtriple=loongarch64 -mattr=+d,-lamcas < %s | FileCheck %s --check-prefix=LA64
 ; RUN: llc --mtriple=loongarch64 -mattr=+d,+lamcas < %s | FileCheck %s --check-prefix=LA64-LAMCAS
+; RUN: llc --mtriple=loongarch64 -mattr=+d,+lamcas,+lam-bh < %s | FileCheck %s --check-prefix=LA64-LAMCAS-LAM-BH
+
+; i8/i16 atomicrmw and/or/xor shouldn't use amcas[_db].b/h to expand
+; lam-bh precedes lamcas
 
 define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LABEL: atomicrmw_xchg_i8_acquire:
@@ -35,6 +39,12 @@ define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -63,6 +73,12 @@ define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB1_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 0 acquire
   ret i8 %1
 }
@@ -91,6 +107,13 @@ define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB2_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 -1 acquire
   ret i8 %1
 }
@@ -129,6 +152,12 @@ define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 %b acquire
   ret i16 %1
 }
@@ -158,6 +187,12 @@ define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB4_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 0 acquire
   ret i16 %1
 }
@@ -187,6 +222,13 @@ define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB5_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 -1 acquire
   ret i16 %1
 
@@ -226,6 +268,12 @@ define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB6_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -265,6 +313,12 @@ define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB7_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i16 %b acquire
   ret i16 %1
 
@@ -304,6 +358,13 @@ define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB8_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -343,6 +404,13 @@ define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB9_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i16 %b acquire
   ret i16 %1
 
@@ -391,6 +459,25 @@ define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB10_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB10_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB10_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -439,6 +526,25 @@ define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB11_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB11_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB11_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i16 %b acquire
   ret i16 %1
 }
@@ -487,6 +593,26 @@ define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB12_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB12_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB12_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -536,6 +662,26 @@ define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB13_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB13_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB13_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i16 %b acquire
   ret i16 %1
 }
@@ -586,6 +732,24 @@ define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB14_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB14_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB14_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -638,6 +802,24 @@ define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB15_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB15_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB15_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i16 %b acquire
   ret i16 %1
 }
@@ -689,6 +871,25 @@ define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB16_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB16_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB16_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -742,6 +943,25 @@ define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB17_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB17_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB17_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i16 %b acquire
   ret i16 %1
 }
@@ -782,6 +1002,21 @@ define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB18_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB18_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB18_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -823,6 +1058,21 @@ define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB19_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB19_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB19_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i16 %b acquire
   ret i16 %1
 
@@ -855,6 +1105,21 @@ define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB20_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i32_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB20_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB20_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i32 %b acquire
   ret i32 %1
 }
@@ -886,6 +1151,21 @@ define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB21_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i64_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB21_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB21_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i64 %b acquire
   ret i64 %1
 }
@@ -906,17 +1186,29 @@ define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i8_acquire:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB22_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB22_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -938,17 +1230,31 @@ define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i16_acquire:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB23_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB23_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i16 %b acquire
   ret i16 %1
 
@@ -967,17 +1273,23 @@ define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i8_acquire:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB24_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB24_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -995,17 +1307,23 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i16_acquire:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB25_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB25_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i16 %b acquire
   ret i16 %1
 
@@ -1024,17 +1342,23 @@ define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_acquire:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB26_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB26_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i8_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i8 %b acquire
   ret i8 %1
 }
@@ -1052,17 +1376,23 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_acquire:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB27_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB27_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i16_acquire:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i16 %b acquire
   ret i16 %1
 
@@ -1101,6 +1431,12 @@ define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1129,6 +1465,12 @@ define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB29_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 0 release
   ret i8 %1
 }
@@ -1157,6 +1499,13 @@ define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB30_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 -1 release
   ret i8 %1
 }
@@ -1195,6 +1544,12 @@ define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 %b release
   ret i16 %1
 }
@@ -1224,6 +1579,12 @@ define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB32_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 0 release
   ret i16 %1
 }
@@ -1253,6 +1614,13 @@ define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB33_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 -1 release
   ret i16 %1
 
@@ -1292,6 +1660,12 @@ define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB34_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1331,6 +1705,12 @@ define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB35_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i16 %b release
   ret i16 %1
 
@@ -1370,6 +1750,13 @@ define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB36_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1409,6 +1796,13 @@ define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB37_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i16 %b release
   ret i16 %1
 
@@ -1457,6 +1851,25 @@ define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB38_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB38_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB38_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1505,6 +1918,25 @@ define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB39_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB39_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB39_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i16 %b release
   ret i16 %1
 }
@@ -1553,6 +1985,26 @@ define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB40_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB40_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB40_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1602,6 +2054,26 @@ define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB41_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB41_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB41_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i16 %b release
   ret i16 %1
 }
@@ -1652,6 +2124,24 @@ define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB42_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB42_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB42_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1704,6 +2194,24 @@ define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB43_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB43_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB43_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i16 %b release
   ret i16 %1
 }
@@ -1755,6 +2263,25 @@ define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB44_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB44_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB44_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1808,6 +2335,25 @@ define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB45_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB45_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB45_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i16 %b release
   ret i16 %1
 }
@@ -1848,6 +2394,21 @@ define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB46_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB46_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB46_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i8 %b release
   ret i8 %1
 }
@@ -1889,6 +2450,21 @@ define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB47_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB47_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB47_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i16 %b release
   ret i16 %1
 
@@ -1921,6 +2497,21 @@ define i32 @atomicrmw_nand_i32_release(ptr %a, i32 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB48_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i32_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB48_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB48_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
     %1 = atomicrmw nand ptr %a, i32 %b release
   ret i32 %1
 }
@@ -1952,6 +2543,21 @@ define i64 @atomicrmw_nand_i64_release(ptr %a, i64 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB49_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i64_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB49_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB49_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i64 %b release
   ret i64 %1
 }
@@ -1972,17 +2578,29 @@ define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i8_release:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB50_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB50_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i8 %b release
   ret i8 %1
 }
@@ -2004,17 +2622,31 @@ define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i16_release:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB51_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB51_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i16 %b release
   ret i16 %1
 
@@ -2033,17 +2665,23 @@ define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i8_release:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB52_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB52_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i8 %b release
   ret i8 %1
 }
@@ -2061,17 +2699,23 @@ define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i16_release:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB53_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB53_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i16 %b release
   ret i16 %1
 
@@ -2090,17 +2734,23 @@ define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_release:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB54_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB54_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i8_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i8 %b release
   ret i8 %1
 }
@@ -2118,17 +2768,23 @@ define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_release:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB55_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB55_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i16_release:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i16 %b release
   ret i16 %1
 
@@ -2167,6 +2823,12 @@ define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2195,6 +2857,12 @@ define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB57_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 0 acq_rel
   ret i8 %1
 }
@@ -2223,6 +2891,13 @@ define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB58_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 -1 acq_rel
   ret i8 %1
 }
@@ -2261,6 +2936,12 @@ define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2290,6 +2971,12 @@ define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB60_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 0 acq_rel
   ret i16 %1
 }
@@ -2319,6 +3006,13 @@ define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB61_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 -1 acq_rel
   ret i16 %1
 
@@ -2358,6 +3052,12 @@ define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB62_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2397,6 +3097,12 @@ define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB63_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i16 %b acq_rel
   ret i16 %1
 
@@ -2436,6 +3142,13 @@ define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB64_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2475,6 +3188,13 @@ define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB65_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i16 %b acq_rel
   ret i16 %1
 
@@ -2523,6 +3243,25 @@ define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB66_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB66_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB66_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2571,6 +3310,25 @@ define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB67_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB67_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB67_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2619,6 +3377,26 @@ define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB68_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB68_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB68_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2668,6 +3446,26 @@ define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB69_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB69_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB69_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2718,6 +3516,24 @@ define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB70_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB70_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB70_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2770,6 +3586,24 @@ define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB71_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB71_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB71_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2821,6 +3655,25 @@ define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB72_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB72_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB72_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2874,6 +3727,25 @@ define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB73_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB73_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB73_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i16 %b acq_rel
   ret i16 %1
 }
@@ -2914,6 +3786,21 @@ define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB74_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB74_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB74_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -2955,6 +3842,21 @@ define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB75_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB75_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB75_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i16 %b acq_rel
   ret i16 %1
 
@@ -2987,6 +3889,21 @@ define i32 @atomicrmw_nand_i32_acq_rel(ptr %a, i32 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB76_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i32_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB76_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB76_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
     %1 = atomicrmw nand ptr %a, i32 %b acq_rel
   ret i32 %1
 }
@@ -3018,6 +3935,21 @@ define i64 @atomicrmw_nand_i64_acq_rel(ptr %a, i64 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB77_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i64_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB77_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB77_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i64 %b acq_rel
   ret i64 %1
 }
@@ -3040,17 +3972,29 @@ define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i8_acq_rel:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB78_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB78_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -3072,17 +4016,31 @@ define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i16_acq_rel:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB79_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB79_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i16 %b acq_rel
   ret i16 %1
 
@@ -3101,17 +4059,23 @@ define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i8_acq_rel:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB80_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB80_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -3129,17 +4093,23 @@ define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i16_acq_rel:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB81_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB81_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i16 %b acq_rel
   ret i16 %1
 
@@ -3158,17 +4128,23 @@ define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_acq_rel:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB82_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB82_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i8_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i8 %b acq_rel
   ret i8 %1
 }
@@ -3186,17 +4162,23 @@ define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_acq_rel:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB83_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB83_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i16_acq_rel:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i16 %b acq_rel
   ret i16 %1
 
@@ -3235,6 +4217,12 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -3263,6 +4251,12 @@ define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB85_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 0 seq_cst
   ret i8 %1
 }
@@ -3291,6 +4285,13 @@ define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB86_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 -1 seq_cst
   ret i8 %1
 }
@@ -3329,6 +4330,12 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3358,6 +4365,12 @@ define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB88_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 0 seq_cst
   ret i16 %1
 }
@@ -3387,6 +4400,13 @@ define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB89_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 -1 seq_cst
   ret i16 %1
 
@@ -3426,6 +4446,12 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB90_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -3465,6 +4491,12 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB91_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i16 %b seq_cst
   ret i16 %1
 
@@ -3504,6 +4536,13 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB92_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -3543,6 +4582,13 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB93_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd_db.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i16 %b seq_cst
   ret i16 %1
 
@@ -3591,6 +4637,25 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB94_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB94_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB94_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -3639,6 +4704,25 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB95_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB95_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB95_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3687,6 +4771,26 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB96_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB96_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB96_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -3736,6 +4840,26 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB97_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB97_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB97_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3786,6 +4910,24 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB98_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB98_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB98_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -3838,6 +4980,24 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB99_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB99_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB99_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3889,6 +5049,25 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB100_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB100_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB100_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -3942,6 +5121,25 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB101_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB101_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB101_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i16 %b seq_cst
   ret i16 %1
 }
@@ -3982,6 +5180,21 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB102_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB102_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB102_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -4023,6 +5236,21 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB103_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB103_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB103_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i16 %b seq_cst
   ret i16 %1
 
@@ -4055,6 +5283,21 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB104_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i32_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB104_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.w $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB104_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
     %1 = atomicrmw nand ptr %a, i32 %b seq_cst
   ret i32 %1
 }
@@ -4086,6 +5329,21 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB105_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i64_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB105_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas_db.d $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB105_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i64 %b seq_cst
   ret i64 %1
 }
@@ -4108,17 +5366,29 @@ define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i8_seq_cst:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB106_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB106_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -4140,17 +5410,31 @@ define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i16_seq_cst:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB107_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB107_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i16 %b seq_cst
   ret i16 %1
 
@@ -4169,17 +5453,23 @@ define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i8_seq_cst:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB108_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB108_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -4197,17 +5487,23 @@ define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i16_seq_cst:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB109_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB109_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i16 %b seq_cst
   ret i16 %1
 
@@ -4226,17 +5522,23 @@ define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_seq_cst:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB110_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB110_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i8_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i8 %b seq_cst
   ret i8 %1
 }
@@ -4254,17 +5556,23 @@ define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_seq_cst:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB111_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas_db.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB111_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i16_seq_cst:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor_db.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i16 %b seq_cst
   ret i16 %1
 
@@ -4303,6 +5611,12 @@ define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -4331,6 +5645,12 @@ define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB113_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap.b $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 0 monotonic
   ret i8 %1
 }
@@ -4359,6 +5679,13 @@ define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB114_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i8 -1 monotonic
   ret i8 %1
 }
@@ -4397,6 +5724,12 @@ define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    move $a0, $a2
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4426,6 +5759,12 @@ define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a2, .LBB116_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_0_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap.h $a1, $zero, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 0 monotonic
   ret i16 %1
 }
@@ -4455,6 +5794,13 @@ define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB117_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    addi.w $a2, $zero, -1
+; LA64-LAMCAS-LAM-BH-NEXT:    amswap.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xchg ptr %a, i16 -1 monotonic
   ret i16 %1
 
@@ -4494,6 +5840,12 @@ define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB118_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd.b $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -4533,6 +5885,12 @@ define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB119_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_add_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd.h $a2, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw add ptr %a, i16 %b monotonic
   ret i16 %1
 
@@ -4572,6 +5930,13 @@ define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB120_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd.b $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -4611,6 +5976,13 @@ define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB121_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_sub_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    sub.w $a2, $zero, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    amadd.h $a1, $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw sub ptr %a, i16 %b monotonic
   ret i16 %1
 
@@ -4659,6 +6031,25 @@ define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB122_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB122_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB122_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -4707,6 +6098,25 @@ define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB123_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umax_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB123_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB123_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umax ptr %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4755,6 +6165,26 @@ define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB124_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a3, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB124_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a4, $a0, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.b $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB124_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -4804,6 +6234,26 @@ define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a5, .LBB125_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_umin_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB125_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sltu $a4, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a4, $a4, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a5, $a1, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a4, $a4, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a5, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.h $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a5, .LBB125_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw umin ptr %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4854,6 +6304,24 @@ define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB126_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB126_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB126_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -4906,6 +6374,24 @@ define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB127_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_max_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB127_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB127_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw max ptr %a, i16 %b monotonic
   ret i16 %1
 }
@@ -4957,6 +6443,25 @@ define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB128_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB128_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.b $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB128_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -5010,6 +6515,25 @@ define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB129_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_min_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB129_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    slt $a5, $a3, $a4
+; LA64-LAMCAS-LAM-BH-NEXT:    xori $a5, $a5, 1
+; LA64-LAMCAS-LAM-BH-NEXT:    masknez $a6, $a1, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-LAM-BH-NEXT:    or $a5, $a5, $a6
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.h $a0, $a5, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB129_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw min ptr %a, i16 %b monotonic
   ret i16 %1
 }
@@ -5052,6 +6576,21 @@ define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB130_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.bu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB130_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.b $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB130_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -5093,6 +6632,21 @@ define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB131_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.hu $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB131_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a3, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a3, $a3, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    ext.w.h $a4, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a4, .LBB131_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i16 %b monotonic
   ret i16 %1
 
@@ -5125,6 +6679,21 @@ define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB132_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i32_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.w $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB132_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.w $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB132_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
     %1 = atomicrmw nand ptr %a, i32 %b monotonic
   ret i32 %1
 }
@@ -5156,6 +6725,21 @@ define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
 ; LA64-LAMCAS-NEXT:    bne $a0, $a3, .LBB133_1
 ; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
 ; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_nand_i64_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a2, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    ld.d $a0, $a0, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    .p2align 4, , 16
+; LA64-LAMCAS-LAM-BH-NEXT:  .LBB133_1: # %atomicrmw.start
+; LA64-LAMCAS-LAM-BH-NEXT:    # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-LAM-BH-NEXT:    move $a3, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    and $a4, $a0, $a1
+; LA64-LAMCAS-LAM-BH-NEXT:    nor $a4, $a4, $zero
+; LA64-LAMCAS-LAM-BH-NEXT:    amcas.d $a0, $a4, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bne $a0, $a3, .LBB133_1
+; LA64-LAMCAS-LAM-BH-NEXT:  # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw nand ptr %a, i64 %b monotonic
   ret i64 %1
 }
@@ -5178,17 +6762,29 @@ define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i8_monotonic:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB134_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB134_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $zero, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -5210,17 +6806,31 @@ define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_and_i16_monotonic:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB135_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    and $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB135_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-NEXT:    amand.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_and_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    lu12i.w $a3, 15
+; LA64-LAMCAS-LAM-BH-NEXT:    ori $a3, $a3, 4095
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a3, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    orn $a1, $a1, $a3
+; LA64-LAMCAS-LAM-BH-NEXT:    amand.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw and ptr %a, i16 %b monotonic
   ret i16 %1
 
@@ -5239,17 +6849,23 @@ define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i8_monotonic:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB136_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB136_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -5267,17 +6883,23 @@ define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_or_i16_monotonic:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB137_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    or $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB137_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amor.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_or_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amor.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw or ptr %a, i16 %b monotonic
   ret i16 %1
 
@@ -5296,17 +6918,23 @@ define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_monotonic:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.bu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB138_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.b $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas.b $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB138_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i8_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    andi $a1, $a1, 255
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i8 %b monotonic
   ret i8 %1
 }
@@ -5324,17 +6952,23 @@ define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
 ;
 ; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_monotonic:
 ; LA64-LAMCAS:       # %bb.0:
-; LA64-LAMCAS-NEXT:    move $a2, $a0
-; LA64-LAMCAS-NEXT:    ld.hu $a0, $a0, 0
-; LA64-LAMCAS-NEXT:    .p2align 4, , 16
-; LA64-LAMCAS-NEXT:  .LBB139_1: # %atomicrmw.start
-; LA64-LAMCAS-NEXT:    # =>This Inner Loop Header: Depth=1
-; LA64-LAMCAS-NEXT:    xor $a3, $a0, $a1
-; LA64-LAMCAS-NEXT:    ext.w.h $a4, $a0
-; LA64-LAMCAS-NEXT:    amcas.h $a0, $a3, $a2
-; LA64-LAMCAS-NEXT:    bne $a0, $a4, .LBB139_1
-; LA64-LAMCAS-NEXT:  # %bb.2: # %atomicrmw.end
-; LA64-LAMCAS-NEXT:    ret
+; LA64-LAMCAS-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-NEXT:    amxor.w $a3, $a1, $a0
+; LA64-LAMCAS-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-NEXT:    ret
+;
+; LA64-LAMCAS-LAM-BH-LABEL: atomicrmw_xor_i16_monotonic:
+; LA64-LAMCAS-LAM-BH:       # %bb.0:
+; LA64-LAMCAS-LAM-BH-NEXT:    slli.d $a2, $a0, 3
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrins.d $a0, $zero, 1, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    bstrpick.d $a1, $a1, 15, 0
+; LA64-LAMCAS-LAM-BH-NEXT:    sll.w $a1, $a1, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    amxor.w $a3, $a1, $a0
+; LA64-LAMCAS-LAM-BH-NEXT:    srl.w $a0, $a3, $a2
+; LA64-LAMCAS-LAM-BH-NEXT:    ret
   %1 = atomicrmw xor ptr %a, i16 %b monotonic
   ret i16 %1
 

>From 24ba15c5f6b77de3f1bb6cf92d9fa579913a9211 Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Tue, 26 Nov 2024 16:33:55 +0800
Subject: [PATCH 3/4] update & add extra tests, support lamcas in
 sys:getHostCPUFeatures and add some additional descriptions

---
 clang/lib/Basic/Targets/LoongArch.cpp         |   3 +-
 clang/test/Driver/loongarch-march.c           |   2 +-
 clang/test/Preprocessor/init-loongarch.c      |   2 +-
 .../LoongArch/LoongArchISelLowering.cpp       |   4 +
 llvm/lib/TargetParser/Host.cpp                |   2 +-
 .../ir-instruction/atomic-cmpxchg.ll          |  98 ++-
 .../LoongArch/atomicrmw-expand.ll             | 812 ++++++++++++++++++
 7 files changed, 918 insertions(+), 5 deletions(-)
 create mode 100644 llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll

diff --git a/clang/lib/Basic/Targets/LoongArch.cpp b/clang/lib/Basic/Targets/LoongArch.cpp
index 145c32bd27525a..c2e2d7b8eae9e6 100644
--- a/clang/lib/Basic/Targets/LoongArch.cpp
+++ b/clang/lib/Basic/Targets/LoongArch.cpp
@@ -205,7 +205,8 @@ void LoongArchTargetInfo::getTargetDefines(const LangOptions &Opts,
       // TODO: As more features of the V1.1 ISA are supported, a unified "v1.1"
       // arch feature set will be used to include all sub-features belonging to
       // the V1.1 ISA version.
-      if (HasFeatureFrecipe && HasFeatureLAM_BH && HasFeatureLAMCAS && HasFeatureLD_SEQ_SA)
+      if (HasFeatureFrecipe && HasFeatureLAM_BH && HasFeatureLAMCAS &&
+          HasFeatureLD_SEQ_SA)
         Builder.defineMacro("__loongarch_arch",
                             Twine('"') + "la64v1.1" + Twine('"'));
       else
diff --git a/clang/test/Driver/loongarch-march.c b/clang/test/Driver/loongarch-march.c
index b3a6557f6f231b..f3d1dd342ed7e1 100644
--- a/clang/test/Driver/loongarch-march.c
+++ b/clang/test/Driver/loongarch-march.c
@@ -53,7 +53,7 @@
 // IR-LA464: attributes #[[#]] ={{.*}}"target-cpu"="la464" {{.*}}"target-features"="+64bit,+d,+f,+lasx,+lsx,+ual"
 // IR-LA64V1P0: attributes #[[#]] ={{.*}}"target-cpu"="loongarch64" {{.*}}"target-features"="+64bit,+d,+lsx,+ual"
 // IR-LA64V1P1: attributes #[[#]] ={{.*}}"target-cpu"="loongarch64" {{.*}}"target-features"="+64bit,+d,+frecipe,+lam-bh,+lamcas,+ld-seq-sa,+lsx,+ual"
-// IR-LA664: attributes #[[#]] ={{.*}}"target-cpu"="la664" {{.*}}"target-features"="+64bit,+d,+f,+frecipe,+lam-bh,+lamcas,+ld-seq-sa,+lasx,+lsx,+ual"
+// IR-LA664: attributes #[[#]] ={{.*}}"target-cpu"="la664" {{.*}}"target-features"="+64bit,+d,+f,+frecipe,+lam-bh,+lamcas,+lasx,+ld-seq-sa,+lsx,+ual"
 
 int foo(void) {
   return 3;
diff --git a/clang/test/Preprocessor/init-loongarch.c b/clang/test/Preprocessor/init-loongarch.c
index aa8b1bbc1e7aea..069a108cfb8695 100644
--- a/clang/test/Preprocessor/init-loongarch.c
+++ b/clang/test/Preprocessor/init-loongarch.c
@@ -825,7 +825,7 @@
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 | \
 // RUN:   FileCheck --match-full-lines  --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LAMCAS,LD-SEQ-SA -DARCH=la64v1.1 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 -Xclang -target-feature -Xclang -frecipe | \
-// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAM-BH,LAMCAS,lD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
+// RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,LAM-BH,LAMCAS,LD-SEQ-SA -DARCH=la64v1.0 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=la64v1.1 -Xclang -target-feature -Xclang -lsx | \
 // RUN:   FileCheck --match-full-lines --check-prefixes=ARCH-TUNE,FRECIPE,LAM-BH,LAMCAS,LD-SEQ-SA -DARCH=loongarch64 -DTUNE=loongarch64 %s
 // RUN: %clang --target=loongarch64 -x c -E -dM %s -o - -march=loongarch64 -Xclang -target-feature -Xclang +frecipe | \
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index da4b8353ca2b01..7297409871edc9 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -5748,6 +5748,10 @@ bool LoongArchTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
   }
 }
 
+// When -mlamcas is enabled, MinCmpXchgSizeInBits will be set to 8,
+// atomicrmw and/or/xor operations with operands less than 32 bits cannot be
+// expanded to am{and/or/xor}[_db].w through AtomicExpandPass. To prevent
+// regression, we need to implement it manually.
 void LoongArchTargetLowering::emitExpandAtomicRMW(AtomicRMWInst *AI) const {
   AtomicRMWInst::BinOp Op = AI->getOperation();
 
diff --git a/llvm/lib/TargetParser/Host.cpp b/llvm/lib/TargetParser/Host.cpp
index 51d6b7cb9b1fd6..b9ce0fdf8ec3d5 100644
--- a/llvm/lib/TargetParser/Host.cpp
+++ b/llvm/lib/TargetParser/Host.cpp
@@ -2026,12 +2026,12 @@ const StringMap<bool> sys::getHostCPUFeatures() {
 
   Features["frecipe"] = cpucfg2 & (1U << 25); // CPUCFG.2.FRECIPE
   Features["lam-bh"] = cpucfg2 & (1U << 27);  // CPUCFG.2.LAM_BH
+  Features["lamcas"] = cpucfg2 & (1U << 28);  // CPUCFG.2.LAMCAS
 
   Features["ld-seq-sa"] = cpucfg3 & (1U << 23); // CPUCFG.3.LD_SEQ_SA
 
   // TODO: Need to complete.
   // Features["div32"] = cpucfg2 & (1U << 26);       // CPUCFG.2.DIV32
-  // Features["lamcas"] = cpucfg2 & (1U << 28);      // CPUCFG.2.LAMCAS
   // Features["llacq-screl"] = cpucfg2 & (1U << 29); // CPUCFG.2.LLACQ_SCREL
   // Features["scq"] = cpucfg2 & (1U << 30);         // CPUCFG.2.SCQ
   return Features;
diff --git a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
index bbc9052549117a..159ffa8c7238a9 100644
--- a/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/atomic-cmpxchg.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc --mtriple=loongarch64 -mattr=+d,-lamcas,-ld-seq-sa < %s | FileCheck %s --check-prefixes=LA64,NO-LD-SEQ-SA
+; RUN: llc --mtriple=loongarch64 -mattr=+d,-ld-seq-sa < %s | FileCheck %s --check-prefixes=LA64,NO-LD-SEQ-SA
 ; RUN: llc --mtriple=loongarch64 -mattr=+d,+ld-seq-sa < %s | FileCheck %s --check-prefixes=LA64,LD-SEQ-SA
 ; RUN: llc --mtriple=loongarch64 -mattr=+d,+lamcas < %s | FileCheck %s --check-prefix=LA64-LAMCAS
 
@@ -170,6 +170,11 @@ define void @cmpxchg_i8_acquire_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
 ; LD-SEQ-SA-NEXT:  .LBB4_3:
 ; LD-SEQ-SA-NEXT:  .LBB4_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i8_acquire_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.b $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val acquire monotonic
   ret void
 }
@@ -225,6 +230,11 @@ define void @cmpxchg_i16_acquire_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
 ; LD-SEQ-SA-NEXT:  .LBB5_3:
 ; LD-SEQ-SA-NEXT:  .LBB5_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i16_acquire_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.h $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val acquire monotonic
   ret void
 }
@@ -260,6 +270,11 @@ define void @cmpxchg_i32_acquire_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
 ; LD-SEQ-SA-NEXT:  .LBB6_3:
 ; LD-SEQ-SA-NEXT:  .LBB6_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i32_acquire_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.w $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val acquire monotonic
   ret void
 }
@@ -293,6 +308,11 @@ define void @cmpxchg_i64_acquire_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
 ; LD-SEQ-SA-NEXT:  .LBB7_3:
 ; LD-SEQ-SA-NEXT:  .LBB7_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i64_acquire_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas_db.d $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val acquire monotonic
   ret void
 }
@@ -620,6 +640,11 @@ define void @cmpxchg_i8_monotonic_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind
 ; LD-SEQ-SA-NEXT:  .LBB16_3:
 ; LD-SEQ-SA-NEXT:  .LBB16_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i8_monotonic_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.b $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
   ret void
 }
@@ -675,6 +700,11 @@ define void @cmpxchg_i16_monotonic_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounw
 ; LD-SEQ-SA-NEXT:  .LBB17_3:
 ; LD-SEQ-SA-NEXT:  .LBB17_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i16_monotonic_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.h $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
   ret void
 }
@@ -710,6 +740,11 @@ define void @cmpxchg_i32_monotonic_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounw
 ; LD-SEQ-SA-NEXT:  .LBB18_3:
 ; LD-SEQ-SA-NEXT:  .LBB18_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i32_monotonic_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.w $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
   ret void
 }
@@ -743,6 +778,11 @@ define void @cmpxchg_i64_monotonic_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounw
 ; LD-SEQ-SA-NEXT:  .LBB19_3:
 ; LD-SEQ-SA-NEXT:  .LBB19_4:
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i64_monotonic_monotonic:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.d $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    ret
   %res = cmpxchg ptr %ptr, i64 %cmp, i64 %val monotonic monotonic
   ret void
 }
@@ -798,6 +838,12 @@ define i8 @cmpxchg_i8_monotonic_monotonic_reti8(ptr %ptr, i8 %cmp, i8 %val) noun
 ; LD-SEQ-SA-NEXT:  .LBB20_4:
 ; LD-SEQ-SA-NEXT:    srl.w $a0, $a5, $a3
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i8_monotonic_monotonic_reti8:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.b $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
   %res = extractvalue { i8, i1 } %tmp, 0
   ret i8 %res
@@ -856,6 +902,12 @@ define i16 @cmpxchg_i16_monotonic_monotonic_reti16(ptr %ptr, i16 %cmp, i16 %val)
 ; LD-SEQ-SA-NEXT:  .LBB21_4:
 ; LD-SEQ-SA-NEXT:    srl.w $a0, $a5, $a3
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i16_monotonic_monotonic_reti16:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.h $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
   %res = extractvalue { i16, i1 } %tmp, 0
   ret i16 %res
@@ -894,6 +946,12 @@ define i32 @cmpxchg_i32_monotonic_monotonic_reti32(ptr %ptr, i32 %cmp, i32 %val)
 ; LD-SEQ-SA-NEXT:  .LBB22_4:
 ; LD-SEQ-SA-NEXT:    move $a0, $a1
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i32_monotonic_monotonic_reti32:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.w $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
   %res = extractvalue { i32, i1 } %tmp, 0
   ret i32 %res
@@ -930,6 +988,12 @@ define i64 @cmpxchg_i64_monotonic_monotonic_reti64(ptr %ptr, i64 %cmp, i64 %val)
 ; LD-SEQ-SA-NEXT:  .LBB23_4:
 ; LD-SEQ-SA-NEXT:    move $a0, $a3
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i64_monotonic_monotonic_reti64:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    amcas.d $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    move $a0, $a1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i64 %cmp, i64 %val monotonic monotonic
   %res = extractvalue { i64, i1 } %tmp, 0
   ret i64 %res
@@ -990,6 +1054,14 @@ define i1 @cmpxchg_i8_monotonic_monotonic_reti1(ptr %ptr, i8 %cmp, i8 %val) noun
 ; LD-SEQ-SA-NEXT:    xor $a0, $a1, $a0
 ; LD-SEQ-SA-NEXT:    sltui $a0, $a0, 1
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i8_monotonic_monotonic_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT:    amcas.b $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a1, $a3
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i8 %cmp, i8 %val monotonic monotonic
   %res = extractvalue { i8, i1 } %tmp, 1
   ret i1 %res
@@ -1052,6 +1124,14 @@ define i1 @cmpxchg_i16_monotonic_monotonic_reti1(ptr %ptr, i16 %cmp, i16 %val) n
 ; LD-SEQ-SA-NEXT:    xor $a0, $a1, $a0
 ; LD-SEQ-SA-NEXT:    sltui $a0, $a0, 1
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i16_monotonic_monotonic_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT:    amcas.h $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a1, $a3
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i16 %cmp, i16 %val monotonic monotonic
   %res = extractvalue { i16, i1 } %tmp, 1
   ret i1 %res
@@ -1092,6 +1172,14 @@ define i1 @cmpxchg_i32_monotonic_monotonic_reti1(ptr %ptr, i32 %cmp, i32 %val) n
 ; LD-SEQ-SA-NEXT:    xor $a0, $a3, $a1
 ; LD-SEQ-SA-NEXT:    sltui $a0, $a0, 1
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i32_monotonic_monotonic_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    addi.w $a3, $a1, 0
+; LA64-LAMCAS-NEXT:    amcas.w $a1, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a1, $a3
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i32 %cmp, i32 %val monotonic monotonic
   %res = extractvalue { i32, i1 } %tmp, 1
   ret i1 %res
@@ -1130,6 +1218,14 @@ define i1 @cmpxchg_i64_monotonic_monotonic_reti1(ptr %ptr, i64 %cmp, i64 %val) n
 ; LD-SEQ-SA-NEXT:    xor $a0, $a3, $a1
 ; LD-SEQ-SA-NEXT:    sltui $a0, $a0, 1
 ; LD-SEQ-SA-NEXT:    ret
+;
+; LA64-LAMCAS-LABEL: cmpxchg_i64_monotonic_monotonic_reti1:
+; LA64-LAMCAS:       # %bb.0:
+; LA64-LAMCAS-NEXT:    move $a3, $a1
+; LA64-LAMCAS-NEXT:    amcas.d $a3, $a2, $a0
+; LA64-LAMCAS-NEXT:    xor $a0, $a3, $a1
+; LA64-LAMCAS-NEXT:    sltui $a0, $a0, 1
+; LA64-LAMCAS-NEXT:    ret
   %tmp = cmpxchg ptr %ptr, i64 %cmp, i64 %val monotonic monotonic
   %res = extractvalue { i64, i1 } %tmp, 1
   ret i1 %res
diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll
new file mode 100644
index 00000000000000..7f06a926a70345
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll
@@ -0,0 +1,812 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -S --mtriple=loongarch64 --mattr=+d,+lamcas %s | FileCheck %s --check-prefix=NO-EXPAND
+; RUN: opt -S --mtriple=loongarch64 --passes=atomic-expand --mattr=+d,+lamcas %s | FileCheck %s --check-prefix=EXPAND
+
+; When -mlamcas is enabled, all atomicrmw and/or/xor ptr %a,a i8/i16 %b ordering 
+; will be expanded to am{and/or/xo}[_db].w by LoongArchTargetLowering::emitExpandAtomicRMW
+
+define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_acquire(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] acquire, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_and_i8_acquire(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0:[0-9]+]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] acquire, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_acquire(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] acquire, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_and_i16_acquire(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] acquire, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_acquire(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] acquire, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_or_i8_acquire(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acquire, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_acquire(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] acquire, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_or_i16_acquire(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acquire, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_acquire(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] acquire, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_acquire(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acquire, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i8 %b acquire
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_acquire(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] acquire, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_acquire(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acquire, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i16 %b acquire
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_release(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] release, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_and_i8_release(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] release, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_release(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] release, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_and_i16_release(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] release, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_release(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] release, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_or_i8_release(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_release(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] release, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_or_i16_release(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_release(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] release, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_release(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i8 %b release
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_release(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] release, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_release(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i16 %b release
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_acq_rel(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] acq_rel, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_and_i8_acq_rel(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] acq_rel, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_acq_rel(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] acq_rel, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_and_i16_acq_rel(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] acq_rel, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_acq_rel(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] acq_rel, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_or_i8_acq_rel(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_acq_rel(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] acq_rel, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_or_i16_acq_rel(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_acq_rel(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] acq_rel, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_acq_rel(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i8 %b acq_rel
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_acq_rel(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] acq_rel, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_acq_rel(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i16 %b acq_rel
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_seq_cst(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] seq_cst, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_and_i8_seq_cst(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_seq_cst(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] seq_cst, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_and_i16_seq_cst(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_seq_cst(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] seq_cst, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_or_i8_seq_cst(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_seq_cst(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] seq_cst, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_or_i16_seq_cst(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_seq_cst(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] seq_cst, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_seq_cst(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i8 %b seq_cst
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_seq_cst(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] seq_cst, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_seq_cst(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i16 %b seq_cst
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_monotonic(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] monotonic, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_and_i8_monotonic(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] monotonic, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_monotonic(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] monotonic, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_and_i16_monotonic(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] monotonic, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw and ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_monotonic(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] monotonic, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_or_i8_monotonic(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_monotonic(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] monotonic, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_or_i16_monotonic(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw or ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
+; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_monotonic(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] monotonic, align 1
+; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
+;
+; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_monotonic(
+; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
+; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i8 %b monotonic
+  ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
+; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_monotonic(
+; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] monotonic, align 2
+; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
+;
+; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_monotonic(
+; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
+; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
+; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
+; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
+; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
+; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
+; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
+; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
+; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
+; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
+; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
+; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
+; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
+;
+  %1 = atomicrmw xor ptr %a, i16 %b monotonic
+  ret i16 %1
+
+}
+

>From b0708449f2e8f3698cb93e2fa0fe4953321570ca Mon Sep 17 00:00:00 2001
From: tangaac <tangyan01 at loongson.cn>
Date: Tue, 26 Nov 2024 17:44:54 +0800
Subject: [PATCH 4/4] reduce uncessary test cases

---
 .../LoongArch/atomicrmw-expand.ll             | 647 ------------------
 1 file changed, 647 deletions(-)

diff --git a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll
index 7f06a926a70345..11859b27c10262 100644
--- a/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/atomicrmw-expand.ll
@@ -110,7 +110,6 @@ define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
 ;
   %1 = atomicrmw or ptr %a, i16 %b acquire
   ret i16 %1
-
 }
 
 define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
@@ -163,650 +162,4 @@ define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
 ;
   %1 = atomicrmw xor ptr %a, i16 %b acquire
   ret i16 %1
-
-}
-
-define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_release(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] release, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_and_i8_release(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] release, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i8 %b release
-  ret i8 %1
-}
-
-define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_release(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] release, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_and_i16_release(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] release, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i16 %b release
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_release(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] release, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_or_i8_release(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i8 %b release
-  ret i8 %1
-}
-
-define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_release(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] release, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_or_i16_release(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i16 %b release
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_release(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] release, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_release(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i8 %b release
-  ret i8 %1
-}
-
-define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_release(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] release, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_release(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] release, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i16 %b release
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_acq_rel(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] acq_rel, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_and_i8_acq_rel(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] acq_rel, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i8 %b acq_rel
-  ret i8 %1
-}
-
-define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_acq_rel(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] acq_rel, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_and_i16_acq_rel(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] acq_rel, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i16 %b acq_rel
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_acq_rel(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] acq_rel, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_or_i8_acq_rel(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i8 %b acq_rel
-  ret i8 %1
-}
-
-define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_acq_rel(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] acq_rel, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_or_i16_acq_rel(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i16 %b acq_rel
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_acq_rel(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] acq_rel, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_acq_rel(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i8 %b acq_rel
-  ret i8 %1
-}
-
-define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_acq_rel(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] acq_rel, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_acq_rel(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] acq_rel, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i16 %b acq_rel
-  ret i16 %1
-
 }
-
-define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_seq_cst(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] seq_cst, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_and_i8_seq_cst(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i8 %b seq_cst
-  ret i8 %1
-}
-
-define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_seq_cst(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] seq_cst, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_and_i16_seq_cst(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] seq_cst, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i16 %b seq_cst
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_seq_cst(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] seq_cst, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_or_i8_seq_cst(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i8 %b seq_cst
-  ret i8 %1
-}
-
-define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_seq_cst(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] seq_cst, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_or_i16_seq_cst(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i16 %b seq_cst
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_seq_cst(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] seq_cst, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_seq_cst(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i8 %b seq_cst
-  ret i8 %1
-}
-
-define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_seq_cst(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] seq_cst, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_seq_cst(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] seq_cst, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i16 %b seq_cst
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_and_i8_monotonic(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i8 [[B]] monotonic, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_and_i8_monotonic(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] monotonic, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i8 %b monotonic
-  ret i8 %1
-}
-
-define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_and_i16_monotonic(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw and ptr [[A]], i16 [[B]] monotonic, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_and_i16_monotonic(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[ANDOPERAND:%.*]] = or i32 [[VALOPERAND_SHIFTED]], [[INV_MASK]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw and ptr [[ALIGNEDADDR]], i32 [[ANDOPERAND]] monotonic, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw and ptr %a, i16 %b monotonic
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_or_i8_monotonic(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i8 [[B]] monotonic, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_or_i8_monotonic(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i8 %b monotonic
-  ret i8 %1
-}
-
-define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_or_i16_monotonic(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw or ptr [[A]], i16 [[B]] monotonic, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_or_i16_monotonic(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw or ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw or ptr %a, i16 %b monotonic
-  ret i16 %1
-
-}
-
-define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
-; NO-EXPAND-LABEL: define i8 @atomicrmw_xor_i8_monotonic(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i8 [[B]] monotonic, align 1
-; NO-EXPAND-NEXT:    ret i8 [[TMP1]]
-;
-; EXPAND-LABEL: define i8 @atomicrmw_xor_i8_monotonic(
-; EXPAND-SAME: ptr [[A:%.*]], i8 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 255, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i8 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i8
-; EXPAND-NEXT:    ret i8 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i8 %b monotonic
-  ret i8 %1
-}
-
-define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
-; NO-EXPAND-LABEL: define i16 @atomicrmw_xor_i16_monotonic(
-; NO-EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; NO-EXPAND-NEXT:    [[TMP1:%.*]] = atomicrmw xor ptr [[A]], i16 [[B]] monotonic, align 2
-; NO-EXPAND-NEXT:    ret i16 [[TMP1]]
-;
-; EXPAND-LABEL: define i16 @atomicrmw_xor_i16_monotonic(
-; EXPAND-SAME: ptr [[A:%.*]], i16 [[B:%.*]]) #[[ATTR0]] {
-; EXPAND-NEXT:    [[ALIGNEDADDR:%.*]] = call ptr @llvm.ptrmask.p0.i64(ptr [[A]], i64 -4)
-; EXPAND-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[A]] to i64
-; EXPAND-NEXT:    [[PTRLSB:%.*]] = and i64 [[TMP1]], 3
-; EXPAND-NEXT:    [[TMP2:%.*]] = shl i64 [[PTRLSB]], 3
-; EXPAND-NEXT:    [[SHIFTAMT:%.*]] = trunc i64 [[TMP2]] to i32
-; EXPAND-NEXT:    [[MASK:%.*]] = shl i32 65535, [[SHIFTAMT]]
-; EXPAND-NEXT:    [[INV_MASK:%.*]] = xor i32 [[MASK]], -1
-; EXPAND-NEXT:    [[TMP3:%.*]] = zext i16 [[B]] to i32
-; EXPAND-NEXT:    [[VALOPERAND_SHIFTED:%.*]] = shl i32 [[TMP3]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[TMP4:%.*]] = atomicrmw xor ptr [[ALIGNEDADDR]], i32 [[VALOPERAND_SHIFTED]] monotonic, align 4
-; EXPAND-NEXT:    [[SHIFTED:%.*]] = lshr i32 [[TMP4]], [[SHIFTAMT]]
-; EXPAND-NEXT:    [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
-; EXPAND-NEXT:    ret i16 [[EXTRACTED]]
-;
-  %1 = atomicrmw xor ptr %a, i16 %b monotonic
-  ret i16 %1
-
-}
-



More information about the cfe-commits mailing list