[clang] [llvm] [LoongArch] Support amcas[_db].{b/h/w/d} instructions. (PR #114189)
via cfe-commits
cfe-commits at lists.llvm.org
Wed Oct 30 00:25:50 PDT 2024
================
@@ -0,0 +1,5025 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch64 -mattr=+d,-lamcas < %s | FileCheck %s --check-prefix=LA64
+; RUN: llc --mtriple=loongarch64 -mattr=+d,+lamcas < %s | FileCheck %s --check-prefix=LA64-LAMCAS
+
+define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB0_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB0_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB0_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB1_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB1_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 0 acquire
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB2_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB2_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 -1 acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB3_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB3_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB3_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB3_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 %b acquire
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB4_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB4_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 0 acquire
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_acquire(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB5_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB5_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 -1 acquire
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB6_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB6_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB6_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB7_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB7_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB7_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB7_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i16 %b acquire
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB8_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB8_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB8_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB9_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB9_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB9_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i16 %b acquire
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB10_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB10_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB10_3: # in Loop: Header=BB10_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB10_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB10_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB10_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB11_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB11_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB11_3: # in Loop: Header=BB11_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB11_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB11_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB11_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i16 %b acquire
+ ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB12_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB12_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB12_3: # in Loop: Header=BB12_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB12_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB12_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB12_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB13_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB13_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB13_3: # in Loop: Header=BB13_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB13_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB13_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB13_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i16 %b acquire
+ ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB14_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB14_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB14_3: # in Loop: Header=BB14_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB14_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB14_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB14_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB15_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB15_3: # in Loop: Header=BB15_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB15_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB15_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB15_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i16 %b acquire
+ ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB16_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB16_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB16_3: # in Loop: Header=BB16_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB16_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB16_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB16_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB17_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB17_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB17_3: # in Loop: Header=BB17_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB17_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB17_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB17_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i16 %b acquire
+ ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB18_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB18_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB18_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB19_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB19_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB19_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB19_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i16 %b acquire
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB20_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB20_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB21_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB21_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i16 %b acquire
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB22_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB22_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB23_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB23_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i16 %b acquire
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB24_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB24_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i8 %b acquire
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_acquire:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_acquire:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB25_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB25_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i16 %b acquire
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB26_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB26_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB26_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB27_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB27_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 0 release
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB28_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB28_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 -1 release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB29_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB29_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB29_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 %b release
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB30_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB30_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 0 release
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_release(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB31_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB31_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 -1 release
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB32_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB32_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB32_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB33_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB33_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB33_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i16 %b release
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB34_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB34_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB34_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB35_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB35_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB35_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB35_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i16 %b release
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB36_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB36_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB36_3: # in Loop: Header=BB36_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB36_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB36_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB36_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB37_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB37_3: # in Loop: Header=BB37_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB37_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB37_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB37_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i16 %b release
+ ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB38_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB38_3: # in Loop: Header=BB38_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB38_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB38_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB38_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB39_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB39_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB39_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB39_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB39_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i16 %b release
+ ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB40_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB40_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB40_3: # in Loop: Header=BB40_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB40_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB40_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB40_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB41_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB41_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB41_3: # in Loop: Header=BB41_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB41_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB41_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB41_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i16 %b release
+ ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB42_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB42_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB42_3: # in Loop: Header=BB42_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB42_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB42_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB42_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB43_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB43_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB43_3: # in Loop: Header=BB43_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB43_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB43_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB43_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i16 %b release
+ ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB44_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB44_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB44_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB45_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB45_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB45_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i16 %b release
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB46_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB46_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB47_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB47_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i16 %b release
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB48_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB48_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB49_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB49_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i16 %b release
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_release(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB50_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB50_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i8 %b release
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_release(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_release:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_release:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB51_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB51_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i16 %b release
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB52_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB52_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB52_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB53_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB53_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 0 acq_rel
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB54_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB54_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 -1 acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB55_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB55_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB55_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB55_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 %b acq_rel
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB56_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB56_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 0 acq_rel
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_acq_rel(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB57_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB57_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 -1 acq_rel
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB58_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB58_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB58_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB58_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB59_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB59_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB59_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB59_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i16 %b acq_rel
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB60_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB60_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB60_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB60_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB61_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB61_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB61_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB61_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i16 %b acq_rel
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB62_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB62_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB62_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB62_3: # in Loop: Header=BB62_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB62_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB62_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB62_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB63_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB63_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB63_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB63_3: # in Loop: Header=BB63_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB63_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB63_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB63_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i16 %b acq_rel
+ ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB64_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB64_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB64_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB64_3: # in Loop: Header=BB64_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB64_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB64_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB64_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB65_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB65_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB65_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB65_3: # in Loop: Header=BB65_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB65_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB65_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB65_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i16 %b acq_rel
+ ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB66_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB66_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB66_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB66_3: # in Loop: Header=BB66_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB66_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB66_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB66_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB67_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB67_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB67_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB67_3: # in Loop: Header=BB67_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB67_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB67_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB67_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i16 %b acq_rel
+ ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB68_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB68_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB68_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB68_3: # in Loop: Header=BB68_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB68_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB68_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB68_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB69_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB69_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB69_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB69_3: # in Loop: Header=BB69_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB69_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB69_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB69_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i16 %b acq_rel
+ ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB70_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB70_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB70_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB70_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB71_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB71_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB71_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB71_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i16 %b acq_rel
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB72_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB72_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB73_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB73_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i16 %b acq_rel
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB74_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB74_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB75_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB75_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i16 %b acq_rel
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_acq_rel(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB76_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB76_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i8 %b acq_rel
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_acq_rel(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_acq_rel:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_acq_rel:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB77_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB77_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i16 %b acq_rel
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB78_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB78_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB78_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB78_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB79_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB79_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 0 seq_cst
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB80_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB80_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 -1 seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB81_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB81_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB81_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT: amcas_db.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB81_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 %b seq_cst
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB82_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB82_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 0 seq_cst
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_seq_cst(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor_db.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB83_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB83_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 -1 seq_cst
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB84_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB84_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB84_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB84_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB85_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB85_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB85_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB85_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i16 %b seq_cst
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB86_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB86_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB86_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB86_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB87_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB87_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB87_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB87_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i16 %b seq_cst
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB88_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB88_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB88_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB88_3: # in Loop: Header=BB88_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB88_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB88_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB88_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB89_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB89_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB89_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB89_3: # in Loop: Header=BB89_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB89_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB89_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB89_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i16 %b seq_cst
+ ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB90_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB90_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB90_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB90_3: # in Loop: Header=BB90_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB90_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB90_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB90_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB91_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB91_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB91_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB91_3: # in Loop: Header=BB91_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB91_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB91_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB91_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i16 %b seq_cst
+ ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB92_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB92_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB92_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB92_3: # in Loop: Header=BB92_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB92_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB92_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB92_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB93_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB93_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB93_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB93_3: # in Loop: Header=BB93_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB93_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB93_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB93_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i16 %b seq_cst
+ ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB94_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB94_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB94_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB94_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB94_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB94_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB95_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB95_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB95_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB95_3: # in Loop: Header=BB95_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB95_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB95_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB95_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i16 %b seq_cst
+ ret i16 %1
+}
+
+define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB96_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB96_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB96_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB96_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB97_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB97_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB97_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB97_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i16 %b seq_cst
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB98_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB98_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB99_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB99_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i16 %b seq_cst
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB100_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB100_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB101_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB101_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i16 %b seq_cst
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_seq_cst(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB102_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB102_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i8 %b seq_cst
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_seq_cst(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i16_seq_cst:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor_db.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i16_seq_cst:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB103_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas_db.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB103_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i16 %b seq_cst
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB104_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB104_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.bu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB104_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a2
+; LA64-LAMCAS-NEXT: amcas.b $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB104_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_0_i8_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB105_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a2, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB105_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 0 monotonic
+ ret i8 %1
+}
+
+define i8 @atomicrmw_xchg_minus_1_i8_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a2, $zero, 255
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB106_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB106_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i8 -1 monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_xchg_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB107_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: addi.w $a5, $a1, 0
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB107_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: ld.hu $a2, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB107_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a2
+; LA64-LAMCAS-NEXT: amcas.h $a2, $a1, $a0
+; LA64-LAMCAS-NEXT: bne $a2, $a3, .LBB107_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: move $a0, $a2
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 %b monotonic
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_0_i16_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_0_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: nor $a2, $a2, $zero
+; LA64-NEXT: amand.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_0_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB108_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a2, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $zero, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a2, .LBB108_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 0 monotonic
+ ret i16 %1
+}
+
+define i16 @atomicrmw_xchg_minus_1_i16_monotonic(ptr %a) nounwind {
+; LA64-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a1, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a2, 15
+; LA64-NEXT: ori $a2, $a2, 4095
+; LA64-NEXT: sll.w $a2, $a2, $a1
+; LA64-NEXT: amor.w $a3, $a2, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a1
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xchg_minus_1_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a1, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: addi.w $a2, $zero, -1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB109_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a2, $a1
+; LA64-LAMCAS-NEXT: bne $a0, $a3, .LBB109_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xchg ptr %a, i16 -1 monotonic
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB110_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB110_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB110_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB110_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_add_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB111_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: add.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB111_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_add_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB111_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: add.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB111_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw add ptr %a, i16 %b monotonic
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB112_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB112_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB112_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB112_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_sub_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB113_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: sub.w $a5, $a4, $a1
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB113_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_sub_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB113_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: sub.d $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB113_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw sub ptr %a, i16 %b monotonic
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_umax_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB114_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB114_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB114_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB114_3: # in Loop: Header=BB114_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB114_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB114_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB114_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umax_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umax_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB115_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a6, $a1, .LBB115_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB115_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB115_3: # in Loop: Header=BB115_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB115_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umax_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB115_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB115_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umax ptr %a, i16 %b monotonic
+ ret i16 %1
+}
+
+define i8 @atomicrmw_umin_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB116_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB116_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB116_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB116_3: # in Loop: Header=BB116_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB116_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: andi $a3, $a1, 255
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB116_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: andi $a4, $a0, 255
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.b $a5, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB116_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_umin_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_umin_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB117_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a6, $a4, $a3
+; LA64-NEXT: move $a5, $a4
+; LA64-NEXT: bgeu $a1, $a6, .LBB117_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB117_1 Depth=1
+; LA64-NEXT: xor $a5, $a4, $a1
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: .LBB117_3: # in Loop: Header=BB117_1 Depth=1
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB117_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_umin_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: bstrpick.d $a3, $a1, 15, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB117_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: bstrpick.d $a4, $a0, 15, 0
+; LA64-LAMCAS-NEXT: sltu $a4, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a4, $a4, 1
+; LA64-LAMCAS-NEXT: masknez $a5, $a1, $a4
+; LA64-LAMCAS-NEXT: maskeqz $a4, $a0, $a4
+; LA64-LAMCAS-NEXT: or $a4, $a4, $a5
+; LA64-LAMCAS-NEXT: ext.w.h $a5, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a4, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a5, .LBB117_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw umin ptr %a, i16 %b monotonic
+ ret i16 %1
+}
+
+define i8 @atomicrmw_max_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB118_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB118_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB118_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB118_3: # in Loop: Header=BB118_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB118_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB118_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB118_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_max_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_max_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB119_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a7, $a1, .LBB119_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB119_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB119_3: # in Loop: Header=BB119_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB119_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_max_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB119_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB119_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw max ptr %a, i16 %b monotonic
+ ret i16 %1
+}
+
+define i8 @atomicrmw_min_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: ori $a4, $zero, 255
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.b $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: xori $a3, $a3, 56
+; LA64-NEXT: .LBB120_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB120_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB120_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB120_3: # in Loop: Header=BB120_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB120_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.b $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB120_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB120_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_min_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_min_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a3, $a2, 24
+; LA64-NEXT: lu12i.w $a4, 15
+; LA64-NEXT: ori $a4, $a4, 4095
+; LA64-NEXT: sll.w $a4, $a4, $a2
+; LA64-NEXT: ext.w.h $a1, $a1
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: ori $a5, $zero, 48
+; LA64-NEXT: sub.d $a3, $a5, $a3
+; LA64-NEXT: .LBB121_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a5, $a0, 0
+; LA64-NEXT: and $a7, $a5, $a4
+; LA64-NEXT: move $a6, $a5
+; LA64-NEXT: sll.w $a7, $a7, $a3
+; LA64-NEXT: sra.w $a7, $a7, $a3
+; LA64-NEXT: bge $a1, $a7, .LBB121_3
+; LA64-NEXT: # %bb.2: # in Loop: Header=BB121_1 Depth=1
+; LA64-NEXT: xor $a6, $a5, $a1
+; LA64-NEXT: and $a6, $a6, $a4
+; LA64-NEXT: xor $a6, $a5, $a6
+; LA64-NEXT: .LBB121_3: # in Loop: Header=BB121_1 Depth=1
+; LA64-NEXT: sc.w $a6, $a0, 0
+; LA64-NEXT: beqz $a6, .LBB121_1
+; LA64-NEXT: # %bb.4:
+; LA64-NEXT: srl.w $a0, $a5, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_min_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: ext.w.h $a3, $a1
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB121_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: slt $a5, $a3, $a4
+; LA64-LAMCAS-NEXT: xori $a5, $a5, 1
+; LA64-LAMCAS-NEXT: masknez $a6, $a1, $a5
+; LA64-LAMCAS-NEXT: maskeqz $a5, $a0, $a5
+; LA64-LAMCAS-NEXT: or $a5, $a5, $a6
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a5, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB121_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw min ptr %a, i16 %b monotonic
+ ret i16 %1
+}
+
+
+
+define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB122_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB122_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB122_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB122_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_nand_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: .LBB123_1: # =>This Inner Loop Header: Depth=1
+; LA64-NEXT: ll.w $a4, $a0, 0
+; LA64-NEXT: and $a5, $a4, $a1
+; LA64-NEXT: nor $a5, $a5, $zero
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: and $a5, $a5, $a3
+; LA64-NEXT: xor $a5, $a4, $a5
+; LA64-NEXT: sc.w $a5, $a0, 0
+; LA64-NEXT: beqz $a5, .LBB123_1
+; LA64-NEXT: # %bb.2:
+; LA64-NEXT: srl.w $a0, $a4, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_nand_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB123_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: nor $a3, $a3, $zero
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB123_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw nand ptr %a, i16 %b monotonic
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: ori $a3, $zero, 255
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB124_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB124_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_and_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: lu12i.w $a3, 15
+; LA64-NEXT: ori $a3, $a3, 4095
+; LA64-NEXT: sll.w $a3, $a3, $a2
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: orn $a1, $a1, $a3
+; LA64-NEXT: amand.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_and_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB125_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: and $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB125_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw and ptr %a, i16 %b monotonic
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB126_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB126_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
+; LA64-LABEL: atomicrmw_or_i16_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amor.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_or_i16_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.hu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB127_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: or $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.h $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.h $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB127_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw or ptr %a, i16 %b monotonic
+ ret i16 %1
+
+}
+
+define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
+; LA64-LABEL: atomicrmw_xor_i8_monotonic:
+; LA64: # %bb.0:
+; LA64-NEXT: slli.d $a2, $a0, 3
+; LA64-NEXT: bstrins.d $a0, $zero, 1, 0
+; LA64-NEXT: andi $a1, $a1, 255
+; LA64-NEXT: sll.w $a1, $a1, $a2
+; LA64-NEXT: amxor.w $a3, $a1, $a0
+; LA64-NEXT: srl.w $a0, $a3, $a2
+; LA64-NEXT: ret
+;
+; LA64-LAMCAS-LABEL: atomicrmw_xor_i8_monotonic:
+; LA64-LAMCAS: # %bb.0:
+; LA64-LAMCAS-NEXT: move $a2, $a0
+; LA64-LAMCAS-NEXT: ld.bu $a0, $a0, 0
+; LA64-LAMCAS-NEXT: .p2align 4, , 16
+; LA64-LAMCAS-NEXT: .LBB128_1: # %atomicrmw.start
+; LA64-LAMCAS-NEXT: # =>This Inner Loop Header: Depth=1
+; LA64-LAMCAS-NEXT: xor $a3, $a0, $a1
+; LA64-LAMCAS-NEXT: ext.w.b $a4, $a0
+; LA64-LAMCAS-NEXT: amcas.b $a0, $a3, $a2
+; LA64-LAMCAS-NEXT: bne $a0, $a4, .LBB128_1
+; LA64-LAMCAS-NEXT: # %bb.2: # %atomicrmw.end
+; LA64-LAMCAS-NEXT: ret
+ %1 = atomicrmw xor ptr %a, i8 %b monotonic
+ ret i8 %1
+}
+
+define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
----------------
tangaac wrote:
When -mlamcas is enabled, it shouldn't use amcas for atomicrmw_(and\or\xor)_(i8\i16).
It could amand.w, amor.w amxor.w to expand.
https://github.com/llvm/llvm-project/blob/5df84a75351d0e9c3e20d50ac1047c937e3b8e88/llvm/lib/CodeGen/AtomicExpandPass.cpp#L670C1-L684C4
How to fix it?
https://github.com/llvm/llvm-project/pull/114189
More information about the cfe-commits
mailing list