[llvm] 5b07059 - [RISCV] Slightly weaken expanded seq_cst atomic op to match reference mapping in in the spec
Alex Bradbury via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 30 12:48:49 PDT 2023
Author: Alex Bradbury
Date: 2023-03-30T20:47:28+01:00
New Revision: 5b0705961841349c3b90a46e5349cd9469e0382f
URL: https://github.com/llvm/llvm-project/commit/5b0705961841349c3b90a46e5349cd9469e0382f
DIFF: https://github.com/llvm/llvm-project/commit/5b0705961841349c3b90a46e5349cd9469e0382f.diff
LOG: [RISCV] Slightly weaken expanded seq_cst atomic op to match reference mapping in in the spec
Table A.6 in the RISC-V ISA Manual indicates that sequentially
consistent atomic ops that have a matching instruction should be mapped
to `amo<op>.{w|d}.aqrl`. But sequentially consistent operations that are
mapped to lr/sc should produce `loop: lr.{w|d}.aqrl; <op>; sc.{w|d}.rl;
bnez loop`. Previously, LLVM produced an `sc.{w|d}.aqrl` which was
stronger than necessary. This patch adjusts the relevant logic so that a
`sc.{w|d}.rl` is produced.
Differential Revision: https://reviews.llvm.org/D146933
Added:
Modified:
llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
llvm/test/CodeGen/RISCV/atomic-rmw.ll
llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
index 9cca52617ab51..967df35b4c447 100644
--- a/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
+++ b/llvm/lib/Target/RISCV/RISCVExpandAtomicPseudoInsts.cpp
@@ -178,7 +178,7 @@ static unsigned getSCForRMW32(AtomicOrdering Ordering) {
case AtomicOrdering::AcquireRelease:
return RISCV::SC_W_RL;
case AtomicOrdering::SequentiallyConsistent:
- return RISCV::SC_W_AQ_RL;
+ return RISCV::SC_W_RL;
}
}
@@ -212,7 +212,7 @@ static unsigned getSCForRMW64(AtomicOrdering Ordering) {
case AtomicOrdering::AcquireRelease:
return RISCV::SC_D_RL;
case AtomicOrdering::SequentiallyConsistent:
- return RISCV::SC_D_AQ_RL;
+ return RISCV::SC_D_RL;
}
}
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
index eb2691cbcd598..651f58d324422 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-branch-on-result.ll
@@ -20,7 +20,7 @@ define void @cmpxchg_and_branch1(ptr %ptr, i32 signext %cmp, i32 signext %val) n
; CHECK-NEXT: bne a3, a1, .LBB0_1
; CHECK-NEXT: # %bb.4: # %do_cmpxchg
; CHECK-NEXT: # in Loop: Header=BB0_3 Depth=2
-; CHECK-NEXT: sc.w.aqrl a4, a2, (a0)
+; CHECK-NEXT: sc.w.rl a4, a2, (a0)
; CHECK-NEXT: bnez a4, .LBB0_3
; CHECK-NEXT: # %bb.5: # %do_cmpxchg
; CHECK-NEXT: # %bb.2: # %exit
@@ -48,7 +48,7 @@ define void @cmpxchg_and_branch2(ptr %ptr, i32 signext %cmp, i32 signext %val) n
; CHECK-NEXT: bne a3, a1, .LBB1_5
; CHECK-NEXT: # %bb.4: # %do_cmpxchg
; CHECK-NEXT: # in Loop: Header=BB1_3 Depth=2
-; CHECK-NEXT: sc.w.aqrl a4, a2, (a0)
+; CHECK-NEXT: sc.w.rl a4, a2, (a0)
; CHECK-NEXT: bnez a4, .LBB1_3
; CHECK-NEXT: .LBB1_5: # %do_cmpxchg
; CHECK-NEXT: # in Loop: Header=BB1_1 Depth=1
@@ -90,7 +90,7 @@ define void @cmpxchg_masked_and_branch1(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV32IA-NEXT: xor a5, a4, a2
; RV32IA-NEXT: and a5, a5, a0
; RV32IA-NEXT: xor a5, a4, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a3)
; RV32IA-NEXT: bnez a5, .LBB2_3
; RV32IA-NEXT: # %bb.5: # %do_cmpxchg
; RV32IA-NEXT: # %bb.2: # %exit
@@ -120,7 +120,7 @@ define void @cmpxchg_masked_and_branch1(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV64IA-NEXT: xor a5, a4, a2
; RV64IA-NEXT: and a5, a5, a0
; RV64IA-NEXT: xor a5, a4, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a3)
; RV64IA-NEXT: bnez a5, .LBB2_3
; RV64IA-NEXT: # %bb.5: # %do_cmpxchg
; RV64IA-NEXT: # %bb.2: # %exit
@@ -160,7 +160,7 @@ define void @cmpxchg_masked_and_branch2(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV32IA-NEXT: xor a5, a4, a2
; RV32IA-NEXT: and a5, a5, a0
; RV32IA-NEXT: xor a5, a4, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a3)
; RV32IA-NEXT: bnez a5, .LBB3_3
; RV32IA-NEXT: .LBB3_5: # %do_cmpxchg
; RV32IA-NEXT: # in Loop: Header=BB3_1 Depth=1
@@ -193,7 +193,7 @@ define void @cmpxchg_masked_and_branch2(ptr %ptr, i8 signext %cmp, i8 signext %v
; RV64IA-NEXT: xor a5, a4, a2
; RV64IA-NEXT: and a5, a5, a0
; RV64IA-NEXT: xor a5, a4, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a3)
; RV64IA-NEXT: bnez a5, .LBB3_3
; RV64IA-NEXT: .LBB3_5: # %do_cmpxchg
; RV64IA-NEXT: # in Loop: Header=BB3_1 Depth=1
@@ -224,7 +224,7 @@ define void @cmpxchg_and_irrelevant_branch(ptr %ptr, i32 signext %cmp, i32 signe
; CHECK-NEXT: bne a4, a1, .LBB4_5
; CHECK-NEXT: # %bb.4: # %do_cmpxchg
; CHECK-NEXT: # in Loop: Header=BB4_3 Depth=2
-; CHECK-NEXT: sc.w.aqrl a5, a2, (a0)
+; CHECK-NEXT: sc.w.rl a5, a2, (a0)
; CHECK-NEXT: bnez a5, .LBB4_3
; CHECK-NEXT: .LBB4_5: # %do_cmpxchg
; CHECK-NEXT: # in Loop: Header=BB4_1 Depth=1
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
index 6cb13bfd53c4f..f25571b5cf253 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg-flag.ll
@@ -15,7 +15,7 @@ define i1 @cmpxchg_i32_seq_cst_seq_cst(ptr %ptr, i32 signext %cmp,
; RV64IA-NEXT: bne a3, a1, .LBB0_3
; RV64IA-NEXT: # %bb.2: # %entry
; RV64IA-NEXT: # in Loop: Header=BB0_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
; RV64IA-NEXT: bnez a4, .LBB0_1
; RV64IA-NEXT: .LBB0_3: # %entry
; RV64IA-NEXT: xor a1, a3, a1
diff --git a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
index 3208110b93105..d04f85ef30783 100644
--- a/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-cmpxchg.ll
@@ -576,7 +576,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-NEXT: xor a5, a2, a0
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a2, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a3)
; RV32IA-NEXT: bnez a5, .LBB7_1
; RV32IA-NEXT: .LBB7_3:
; RV32IA-NEXT: ret
@@ -612,7 +612,7 @@ define void @cmpxchg_i8_seq_cst_monotonic(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-NEXT: xor a5, a2, a0
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a2, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a3)
; RV64IA-NEXT: bnez a5, .LBB7_1
; RV64IA-NEXT: .LBB7_3:
; RV64IA-NEXT: ret
@@ -652,7 +652,7 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-NEXT: xor a5, a2, a0
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a2, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a3)
; RV32IA-NEXT: bnez a5, .LBB8_1
; RV32IA-NEXT: .LBB8_3:
; RV32IA-NEXT: ret
@@ -688,7 +688,7 @@ define void @cmpxchg_i8_seq_cst_acquire(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-NEXT: xor a5, a2, a0
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a2, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a3)
; RV64IA-NEXT: bnez a5, .LBB8_1
; RV64IA-NEXT: .LBB8_3:
; RV64IA-NEXT: ret
@@ -728,7 +728,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV32IA-NEXT: xor a5, a2, a0
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a2, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a3)
; RV32IA-NEXT: bnez a5, .LBB9_1
; RV32IA-NEXT: .LBB9_3:
; RV32IA-NEXT: ret
@@ -764,7 +764,7 @@ define void @cmpxchg_i8_seq_cst_seq_cst(ptr %ptr, i8 %cmp, i8 %val) nounwind {
; RV64IA-NEXT: xor a5, a2, a0
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a2, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a3)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a3)
; RV64IA-NEXT: bnez a5, .LBB9_1
; RV64IA-NEXT: .LBB9_3:
; RV64IA-NEXT: ret
@@ -1351,7 +1351,7 @@ define void @cmpxchg_i16_seq_cst_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV32IA-NEXT: xor a4, a2, a0
; RV32IA-NEXT: and a4, a4, a5
; RV32IA-NEXT: xor a4, a2, a4
-; RV32IA-NEXT: sc.w.aqrl a4, a4, (a3)
+; RV32IA-NEXT: sc.w.rl a4, a4, (a3)
; RV32IA-NEXT: bnez a4, .LBB17_1
; RV32IA-NEXT: .LBB17_3:
; RV32IA-NEXT: ret
@@ -1388,7 +1388,7 @@ define void @cmpxchg_i16_seq_cst_monotonic(ptr %ptr, i16 %cmp, i16 %val) nounwin
; RV64IA-NEXT: xor a4, a2, a0
; RV64IA-NEXT: and a4, a4, a5
; RV64IA-NEXT: xor a4, a2, a4
-; RV64IA-NEXT: sc.w.aqrl a4, a4, (a3)
+; RV64IA-NEXT: sc.w.rl a4, a4, (a3)
; RV64IA-NEXT: bnez a4, .LBB17_1
; RV64IA-NEXT: .LBB17_3:
; RV64IA-NEXT: ret
@@ -1429,7 +1429,7 @@ define void @cmpxchg_i16_seq_cst_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV32IA-NEXT: xor a4, a2, a0
; RV32IA-NEXT: and a4, a4, a5
; RV32IA-NEXT: xor a4, a2, a4
-; RV32IA-NEXT: sc.w.aqrl a4, a4, (a3)
+; RV32IA-NEXT: sc.w.rl a4, a4, (a3)
; RV32IA-NEXT: bnez a4, .LBB18_1
; RV32IA-NEXT: .LBB18_3:
; RV32IA-NEXT: ret
@@ -1466,7 +1466,7 @@ define void @cmpxchg_i16_seq_cst_acquire(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV64IA-NEXT: xor a4, a2, a0
; RV64IA-NEXT: and a4, a4, a5
; RV64IA-NEXT: xor a4, a2, a4
-; RV64IA-NEXT: sc.w.aqrl a4, a4, (a3)
+; RV64IA-NEXT: sc.w.rl a4, a4, (a3)
; RV64IA-NEXT: bnez a4, .LBB18_1
; RV64IA-NEXT: .LBB18_3:
; RV64IA-NEXT: ret
@@ -1507,7 +1507,7 @@ define void @cmpxchg_i16_seq_cst_seq_cst(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV32IA-NEXT: xor a4, a2, a0
; RV32IA-NEXT: and a4, a4, a5
; RV32IA-NEXT: xor a4, a2, a4
-; RV32IA-NEXT: sc.w.aqrl a4, a4, (a3)
+; RV32IA-NEXT: sc.w.rl a4, a4, (a3)
; RV32IA-NEXT: bnez a4, .LBB19_1
; RV32IA-NEXT: .LBB19_3:
; RV32IA-NEXT: ret
@@ -1544,7 +1544,7 @@ define void @cmpxchg_i16_seq_cst_seq_cst(ptr %ptr, i16 %cmp, i16 %val) nounwind
; RV64IA-NEXT: xor a4, a2, a0
; RV64IA-NEXT: and a4, a4, a5
; RV64IA-NEXT: xor a4, a2, a4
-; RV64IA-NEXT: sc.w.aqrl a4, a4, (a3)
+; RV64IA-NEXT: sc.w.rl a4, a4, (a3)
; RV64IA-NEXT: bnez a4, .LBB19_1
; RV64IA-NEXT: .LBB19_3:
; RV64IA-NEXT: ret
@@ -1943,7 +1943,7 @@ define void @cmpxchg_i32_seq_cst_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV32IA-NEXT: lr.w.aqrl a3, (a0)
; RV32IA-NEXT: bne a3, a1, .LBB27_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB27_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV32IA-NEXT: sc.w.rl a4, a2, (a0)
; RV32IA-NEXT: bnez a4, .LBB27_1
; RV32IA-NEXT: .LBB27_3:
; RV32IA-NEXT: ret
@@ -1968,7 +1968,7 @@ define void @cmpxchg_i32_seq_cst_monotonic(ptr %ptr, i32 %cmp, i32 %val) nounwin
; RV64IA-NEXT: lr.w.aqrl a3, (a0)
; RV64IA-NEXT: bne a3, a1, .LBB27_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB27_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
; RV64IA-NEXT: bnez a4, .LBB27_1
; RV64IA-NEXT: .LBB27_3:
; RV64IA-NEXT: ret
@@ -1996,7 +1996,7 @@ define void @cmpxchg_i32_seq_cst_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV32IA-NEXT: lr.w.aqrl a3, (a0)
; RV32IA-NEXT: bne a3, a1, .LBB28_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB28_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV32IA-NEXT: sc.w.rl a4, a2, (a0)
; RV32IA-NEXT: bnez a4, .LBB28_1
; RV32IA-NEXT: .LBB28_3:
; RV32IA-NEXT: ret
@@ -2021,7 +2021,7 @@ define void @cmpxchg_i32_seq_cst_acquire(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV64IA-NEXT: lr.w.aqrl a3, (a0)
; RV64IA-NEXT: bne a3, a1, .LBB28_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB28_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
; RV64IA-NEXT: bnez a4, .LBB28_1
; RV64IA-NEXT: .LBB28_3:
; RV64IA-NEXT: ret
@@ -2049,7 +2049,7 @@ define void @cmpxchg_i32_seq_cst_seq_cst(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV32IA-NEXT: lr.w.aqrl a3, (a0)
; RV32IA-NEXT: bne a3, a1, .LBB29_3
; RV32IA-NEXT: # %bb.2: # in Loop: Header=BB29_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV32IA-NEXT: sc.w.rl a4, a2, (a0)
; RV32IA-NEXT: bnez a4, .LBB29_1
; RV32IA-NEXT: .LBB29_3:
; RV32IA-NEXT: ret
@@ -2074,7 +2074,7 @@ define void @cmpxchg_i32_seq_cst_seq_cst(ptr %ptr, i32 %cmp, i32 %val) nounwind
; RV64IA-NEXT: lr.w.aqrl a3, (a0)
; RV64IA-NEXT: bne a3, a1, .LBB29_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB29_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a4, a2, (a0)
+; RV64IA-NEXT: sc.w.rl a4, a2, (a0)
; RV64IA-NEXT: bnez a4, .LBB29_1
; RV64IA-NEXT: .LBB29_3:
; RV64IA-NEXT: ret
@@ -2568,7 +2568,7 @@ define void @cmpxchg_i64_seq_cst_monotonic(ptr %ptr, i64 %cmp, i64 %val) nounwin
; RV64IA-NEXT: lr.d.aqrl a3, (a0)
; RV64IA-NEXT: bne a3, a1, .LBB37_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB37_1 Depth=1
-; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0)
+; RV64IA-NEXT: sc.d.rl a4, a2, (a0)
; RV64IA-NEXT: bnez a4, .LBB37_1
; RV64IA-NEXT: .LBB37_3:
; RV64IA-NEXT: ret
@@ -2630,7 +2630,7 @@ define void @cmpxchg_i64_seq_cst_acquire(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV64IA-NEXT: lr.d.aqrl a3, (a0)
; RV64IA-NEXT: bne a3, a1, .LBB38_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB38_1 Depth=1
-; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0)
+; RV64IA-NEXT: sc.d.rl a4, a2, (a0)
; RV64IA-NEXT: bnez a4, .LBB38_1
; RV64IA-NEXT: .LBB38_3:
; RV64IA-NEXT: ret
@@ -2692,7 +2692,7 @@ define void @cmpxchg_i64_seq_cst_seq_cst(ptr %ptr, i64 %cmp, i64 %val) nounwind
; RV64IA-NEXT: lr.d.aqrl a3, (a0)
; RV64IA-NEXT: bne a3, a1, .LBB39_3
; RV64IA-NEXT: # %bb.2: # in Loop: Header=BB39_1 Depth=1
-; RV64IA-NEXT: sc.d.aqrl a4, a2, (a0)
+; RV64IA-NEXT: sc.d.rl a4, a2, (a0)
; RV64IA-NEXT: bnez a4, .LBB39_1
; RV64IA-NEXT: .LBB39_3:
; RV64IA-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/atomic-rmw.ll b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
index 0027d0a24f072..aabbb912bdafe 100644
--- a/llvm/test/CodeGen/RISCV/atomic-rmw.ll
+++ b/llvm/test/CodeGen/RISCV/atomic-rmw.ll
@@ -293,7 +293,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB4_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
@@ -323,7 +323,7 @@ define i8 @atomicrmw_xchg_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB4_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
@@ -613,7 +613,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB9_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
@@ -643,7 +643,7 @@ define i8 @atomicrmw_add_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB9_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
@@ -933,7 +933,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB14_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
@@ -963,7 +963,7 @@ define i8 @atomicrmw_sub_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB14_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
@@ -1522,7 +1522,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB24_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a4, a0
@@ -1553,7 +1553,7 @@ define i8 @atomicrmw_nand_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB24_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a4, a0
@@ -2652,7 +2652,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: and a6, a6, a4
; RV32IA-NEXT: xor a6, a5, a6
; RV32IA-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV32IA-NEXT: sc.w.rl a6, a6, (a2)
; RV32IA-NEXT: bnez a6, .LBB39_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a5, a0
@@ -2723,7 +2723,7 @@ define i8 @atomicrmw_max_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: and a6, a6, a4
; RV64IA-NEXT: xor a6, a5, a6
; RV64IA-NEXT: .LBB39_3: # in Loop: Header=BB39_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV64IA-NEXT: sc.w.rl a6, a6, (a2)
; RV64IA-NEXT: bnez a6, .LBB39_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a5, a0
@@ -3382,7 +3382,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: and a6, a6, a4
; RV32IA-NEXT: xor a6, a5, a6
; RV32IA-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV32IA-NEXT: sc.w.rl a6, a6, (a2)
; RV32IA-NEXT: bnez a6, .LBB44_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a5, a0
@@ -3453,7 +3453,7 @@ define i8 @atomicrmw_min_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: and a6, a6, a4
; RV64IA-NEXT: xor a6, a5, a6
; RV64IA-NEXT: .LBB44_3: # in Loop: Header=BB44_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV64IA-NEXT: sc.w.rl a6, a6, (a2)
; RV64IA-NEXT: bnez a6, .LBB44_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a5, a0
@@ -4049,7 +4049,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB49_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a4, a0
@@ -4113,7 +4113,7 @@ define i8 @atomicrmw_umax_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: .LBB49_3: # in Loop: Header=BB49_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB49_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a4, a0
@@ -4709,7 +4709,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV32IA-NEXT: and a5, a5, a3
; RV32IA-NEXT: xor a5, a4, a5
; RV32IA-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB54_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a4, a0
@@ -4773,7 +4773,7 @@ define i8 @atomicrmw_umin_i8_seq_cst(ptr %a, i8 %b) nounwind {
; RV64IA-NEXT: and a5, a5, a3
; RV64IA-NEXT: xor a5, a4, a5
; RV64IA-NEXT: .LBB54_3: # in Loop: Header=BB54_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB54_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a4, a0
@@ -5072,7 +5072,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB59_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
@@ -5103,7 +5103,7 @@ define i16 @atomicrmw_xchg_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB59_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -5402,7 +5402,7 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB64_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
@@ -5433,7 +5433,7 @@ define i16 @atomicrmw_add_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB64_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -5732,7 +5732,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB69_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
@@ -5763,7 +5763,7 @@ define i16 @atomicrmw_sub_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB69_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -6341,7 +6341,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB79_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: srl a0, a3, a0
@@ -6373,7 +6373,7 @@ define i16 @atomicrmw_nand_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB79_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -7510,7 +7510,7 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: and a6, a6, a4
; RV32IA-NEXT: xor a6, a3, a6
; RV32IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV32IA-NEXT: sc.w.rl a6, a6, (a2)
; RV32IA-NEXT: bnez a6, .LBB94_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a3, a0
@@ -7583,7 +7583,7 @@ define i16 @atomicrmw_max_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: and a6, a6, a4
; RV64IA-NEXT: xor a6, a3, a6
; RV64IA-NEXT: .LBB94_3: # in Loop: Header=BB94_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV64IA-NEXT: sc.w.rl a6, a6, (a2)
; RV64IA-NEXT: bnez a6, .LBB94_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -8260,7 +8260,7 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: and a6, a6, a4
; RV32IA-NEXT: xor a6, a3, a6
; RV32IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV32IA-NEXT: sc.w.rl a6, a6, (a2)
; RV32IA-NEXT: bnez a6, .LBB99_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a3, a0
@@ -8333,7 +8333,7 @@ define i16 @atomicrmw_min_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: and a6, a6, a4
; RV64IA-NEXT: xor a6, a3, a6
; RV64IA-NEXT: .LBB99_3: # in Loop: Header=BB99_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a6, a6, (a2)
+; RV64IA-NEXT: sc.w.rl a6, a6, (a2)
; RV64IA-NEXT: bnez a6, .LBB99_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -8974,7 +8974,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: .LBB104_3: # in Loop: Header=BB104_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB104_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a3, a0
@@ -9043,7 +9043,7 @@ define i16 @atomicrmw_umax_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: .LBB104_3: # in Loop: Header=BB104_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB104_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -9684,7 +9684,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV32IA-NEXT: and a5, a5, a4
; RV32IA-NEXT: xor a5, a3, a5
; RV32IA-NEXT: .LBB109_3: # in Loop: Header=BB109_1 Depth=1
-; RV32IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV32IA-NEXT: sc.w.rl a5, a5, (a2)
; RV32IA-NEXT: bnez a5, .LBB109_1
; RV32IA-NEXT: # %bb.4:
; RV32IA-NEXT: srl a0, a3, a0
@@ -9753,7 +9753,7 @@ define i16 @atomicrmw_umin_i16_seq_cst(ptr %a, i16 %b) nounwind {
; RV64IA-NEXT: and a5, a5, a4
; RV64IA-NEXT: xor a5, a3, a5
; RV64IA-NEXT: .LBB109_3: # in Loop: Header=BB109_1 Depth=1
-; RV64IA-NEXT: sc.w.aqrl a5, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a5, a5, (a2)
; RV64IA-NEXT: bnez a5, .LBB109_1
; RV64IA-NEXT: # %bb.4:
; RV64IA-NEXT: srlw a0, a3, a0
@@ -10661,7 +10661,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV32IA-NEXT: lr.w.aqrl a2, (a0)
; RV32IA-NEXT: and a3, a2, a1
; RV32IA-NEXT: not a3, a3
-; RV32IA-NEXT: sc.w.aqrl a3, a3, (a0)
+; RV32IA-NEXT: sc.w.rl a3, a3, (a0)
; RV32IA-NEXT: bnez a3, .LBB134_1
; RV32IA-NEXT: # %bb.2:
; RV32IA-NEXT: mv a0, a2
@@ -10683,7 +10683,7 @@ define i32 @atomicrmw_nand_i32_seq_cst(ptr %a, i32 %b) nounwind {
; RV64IA-NEXT: lr.w.aqrl a2, (a0)
; RV64IA-NEXT: and a3, a2, a1
; RV64IA-NEXT: not a3, a3
-; RV64IA-NEXT: sc.w.aqrl a3, a3, (a0)
+; RV64IA-NEXT: sc.w.rl a3, a3, (a0)
; RV64IA-NEXT: bnez a3, .LBB134_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: mv a0, a2
@@ -13818,7 +13818,7 @@ define i64 @atomicrmw_nand_i64_seq_cst(ptr %a, i64 %b) nounwind {
; RV64IA-NEXT: lr.d.aqrl a2, (a0)
; RV64IA-NEXT: and a3, a2, a1
; RV64IA-NEXT: not a3, a3
-; RV64IA-NEXT: sc.d.aqrl a3, a3, (a0)
+; RV64IA-NEXT: sc.d.rl a3, a3, (a0)
; RV64IA-NEXT: bnez a3, .LBB189_1
; RV64IA-NEXT: # %bb.2:
; RV64IA-NEXT: mv a0, a2
diff --git a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
index dc4b50215ab0a..687bd304143ee 100644
--- a/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/RISCV/atomicrmw-uinc-udec-wrap.ll
@@ -83,7 +83,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: bne a4, a5, .LBB0_1
; RV32IA-NEXT: # %bb.4: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB0_3 Depth=2
-; RV32IA-NEXT: sc.w.aqrl a7, a6, (a2)
+; RV32IA-NEXT: sc.w.rl a7, a6, (a2)
; RV32IA-NEXT: bnez a7, .LBB0_3
; RV32IA-NEXT: # %bb.5: # %atomicrmw.start
; RV32IA-NEXT: # %bb.2: # %atomicrmw.end
@@ -159,7 +159,7 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: bne a4, a6, .LBB0_1
; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB0_3 Depth=2
-; RV64IA-NEXT: sc.w.aqrl a7, a5, (a2)
+; RV64IA-NEXT: sc.w.rl a7, a5, (a2)
; RV64IA-NEXT: bnez a7, .LBB0_3
; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
@@ -245,7 +245,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; RV32IA-NEXT: bne a5, a6, .LBB1_1
; RV32IA-NEXT: # %bb.4: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB1_3 Depth=2
-; RV32IA-NEXT: sc.w.aqrl t0, a7, (a2)
+; RV32IA-NEXT: sc.w.rl t0, a7, (a2)
; RV32IA-NEXT: bnez t0, .LBB1_3
; RV32IA-NEXT: # %bb.5: # %atomicrmw.start
; RV32IA-NEXT: # %bb.2: # %atomicrmw.end
@@ -327,7 +327,7 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; RV64IA-NEXT: bne a5, a7, .LBB1_1
; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB1_3 Depth=2
-; RV64IA-NEXT: sc.w.aqrl t0, a6, (a2)
+; RV64IA-NEXT: sc.w.rl t0, a6, (a2)
; RV64IA-NEXT: bnez t0, .LBB1_3
; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
@@ -393,7 +393,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
; RV32IA-NEXT: bne a2, a3, .LBB2_1
; RV32IA-NEXT: # %bb.4: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB2_3 Depth=2
-; RV32IA-NEXT: sc.w.aqrl a5, a4, (a0)
+; RV32IA-NEXT: sc.w.rl a5, a4, (a0)
; RV32IA-NEXT: bnez a5, .LBB2_3
; RV32IA-NEXT: # %bb.5: # %atomicrmw.start
; RV32IA-NEXT: # %bb.2: # %atomicrmw.end
@@ -456,7 +456,7 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
; RV64IA-NEXT: bne a2, a4, .LBB2_1
; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB2_3 Depth=2
-; RV64IA-NEXT: sc.w.aqrl a5, a3, (a0)
+; RV64IA-NEXT: sc.w.rl a5, a3, (a0)
; RV64IA-NEXT: bnez a5, .LBB2_3
; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
@@ -634,7 +634,7 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; RV64IA-NEXT: bne a2, a3, .LBB3_1
; RV64IA-NEXT: # %bb.4: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB3_3 Depth=2
-; RV64IA-NEXT: sc.d.aqrl a5, a4, (a0)
+; RV64IA-NEXT: sc.d.rl a5, a4, (a0)
; RV64IA-NEXT: bnez a5, .LBB3_3
; RV64IA-NEXT: # %bb.5: # %atomicrmw.start
; RV64IA-NEXT: # %bb.2: # %atomicrmw.end
@@ -717,7 +717,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV32IA-NEXT: bne a6, a5, .LBB4_7
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB4_5 Depth=2
-; RV32IA-NEXT: sc.w.aqrl t0, a7, (a2)
+; RV32IA-NEXT: sc.w.rl t0, a7, (a2)
; RV32IA-NEXT: bnez t0, .LBB4_5
; RV32IA-NEXT: .LBB4_7: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB4_2 Depth=1
@@ -814,7 +814,7 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; RV64IA-NEXT: bne a4, a6, .LBB4_7
; RV64IA-NEXT: # %bb.6: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB4_5 Depth=2
-; RV64IA-NEXT: sc.w.aqrl t0, a7, (a2)
+; RV64IA-NEXT: sc.w.rl t0, a7, (a2)
; RV64IA-NEXT: bnez t0, .LBB4_5
; RV64IA-NEXT: .LBB4_7: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB4_2 Depth=1
@@ -919,7 +919,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
; RV32IA-NEXT: bne a7, a6, .LBB5_7
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB5_5 Depth=2
-; RV32IA-NEXT: sc.w.aqrl t1, t0, (a2)
+; RV32IA-NEXT: sc.w.rl t1, t0, (a2)
; RV32IA-NEXT: bnez t1, .LBB5_5
; RV32IA-NEXT: .LBB5_7: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB5_2 Depth=1
@@ -1022,7 +1022,7 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
; RV64IA-NEXT: bne a5, a7, .LBB5_7
; RV64IA-NEXT: # %bb.6: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB5_5 Depth=2
-; RV64IA-NEXT: sc.w.aqrl t1, t0, (a2)
+; RV64IA-NEXT: sc.w.rl t1, t0, (a2)
; RV64IA-NEXT: bnez t1, .LBB5_5
; RV64IA-NEXT: .LBB5_7: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB5_2 Depth=1
@@ -1105,7 +1105,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
; RV32IA-NEXT: bne a2, a3, .LBB6_7
; RV32IA-NEXT: # %bb.6: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB6_5 Depth=2
-; RV32IA-NEXT: sc.w.aqrl a5, a4, (a0)
+; RV32IA-NEXT: sc.w.rl a5, a4, (a0)
; RV32IA-NEXT: bnez a5, .LBB6_5
; RV32IA-NEXT: .LBB6_7: # %atomicrmw.start
; RV32IA-NEXT: # in Loop: Header=BB6_2 Depth=1
@@ -1188,7 +1188,7 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
; RV64IA-NEXT: bne a2, a4, .LBB6_7
; RV64IA-NEXT: # %bb.6: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB6_5 Depth=2
-; RV64IA-NEXT: sc.w.aqrl a6, a5, (a0)
+; RV64IA-NEXT: sc.w.rl a6, a5, (a0)
; RV64IA-NEXT: bnez a6, .LBB6_5
; RV64IA-NEXT: .LBB6_7: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB6_2 Depth=1
@@ -1396,7 +1396,7 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
; RV64IA-NEXT: bne a2, a3, .LBB7_7
; RV64IA-NEXT: # %bb.6: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB7_5 Depth=2
-; RV64IA-NEXT: sc.d.aqrl a5, a4, (a0)
+; RV64IA-NEXT: sc.d.rl a5, a4, (a0)
; RV64IA-NEXT: bnez a5, .LBB7_5
; RV64IA-NEXT: .LBB7_7: # %atomicrmw.start
; RV64IA-NEXT: # in Loop: Header=BB7_2 Depth=1
More information about the llvm-commits
mailing list