[llvm] [SelectionDAG][Targets] Replace atomic_load_8/atomic_load_16 with atomic_load_*ext_8/atomic_load_*ext_16 where possible. (PR #137279)
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Thu Apr 24 18:00:21 PDT 2025
https://github.com/topperc created https://github.com/llvm/llvm-project/pull/137279
isAnyExtLoad/isZExtLoad/isSignExtLoad are able to emit predicate checks from tablegen now so we should use them.
The next step would be to add isNonExtLoad versions and migrate all remaining uses of atomic_load_8/16/32/64 to that.
>From af7a138bd39e90540ba5be47319c896257f45fec Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Thu, 24 Apr 2025 17:51:30 -0700
Subject: [PATCH] [SelectionDAG] Replace atomic_load_8/atomic_load_16 with
atomic_load_*ext_8/atomic_load_*ext_16 where possible.
isAnyExtLoad/isZExtLoad/isSignExtLoad are able to emit predicate
checks from tablegen now so we should use them.
The next step would be to add isNonExtLoad versions and migrate all
remaining uses of atomic_load_8/16/32/64 to that.
---
.../include/llvm/Target/TargetSelectionDAG.td | 5 ++++
.../lib/Target/AArch64/AArch64InstrAtomics.td | 4 +--
llvm/lib/Target/AMDGPU/AMDGPUInstructions.td | 12 ++++++---
llvm/lib/Target/AMDGPU/BUFInstructions.td | 7 ++---
llvm/lib/Target/AMDGPU/DSInstructions.td | 6 ++---
llvm/lib/Target/AMDGPU/FLATInstructions.td | 13 +++++-----
llvm/lib/Target/AMDGPU/SIInstrInfo.td | 26 ++++++++++++++-----
llvm/lib/Target/ARM/ARMInstrInfo.td | 16 ++++++------
llvm/lib/Target/ARM/ARMInstrThumb.td | 8 +++---
llvm/lib/Target/ARM/ARMInstrThumb2.td | 16 ++++++------
llvm/lib/Target/Hexagon/HexagonPatterns.td | 20 +++++++-------
llvm/lib/Target/Lanai/LanaiInstrInfo.td | 2 +-
.../Target/LoongArch/LoongArchInstrInfo.td | 9 ++++---
llvm/lib/Target/Mips/MicroMipsInstrInfo.td | 4 +--
llvm/lib/Target/Mips/Mips64InstrInfo.td | 6 ++---
llvm/lib/Target/Mips/MipsInstrInfo.td | 4 +--
llvm/lib/Target/PowerPC/PPCInstrInfo.td | 8 +++---
llvm/lib/Target/PowerPC/PPCInstrP10.td | 8 +++---
llvm/lib/Target/RISCV/RISCVGISel.td | 2 +-
llvm/lib/Target/Sparc/SparcInstrInfo.td | 8 +++---
llvm/lib/Target/VE/VEInstrInfo.td | 12 ++++-----
.../WebAssembly/WebAssemblyInstrAtomics.td | 12 ++++-----
22 files changed, 116 insertions(+), 92 deletions(-)
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index cd0dc26a1f257..d7bf8f5dce435 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -1946,6 +1946,11 @@ def atomic_load_azext_16 : PatFrags<(ops node:$op),
[(atomic_load_aext_16 node:$op),
(atomic_load_zext_16 node:$op)]>;
+// Atomic load which zeroes or anyextends the high bits.
+def atomic_load_azext_32 : PatFrags<(ops node:$op),
+ [(atomic_load_aext_32 node:$op),
+ (atomic_load_zext_32 node:$op)]>;
+
// Atomic load which sign extends or anyextends the high bits.
def atomic_load_asext_8 : PatFrags<(ops node:$op),
[(atomic_load_aext_8 node:$op),
diff --git a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
index a5f7b384b3e5d..28d45fe25d30c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrAtomics.td
@@ -51,9 +51,9 @@ class seq_cst_load<PatFrags base>
let Predicates = [HasRCPC] in {
// v8.3 Release Consistent Processor Consistent support, optional in v8.2.
// 8-bit loads
- def : Pat<(acquiring_load<atomic_load_8> GPR64sp:$ptr), (LDAPRB GPR64sp:$ptr)>;
+ def : Pat<(acquiring_load<atomic_load_azext_8> GPR64sp:$ptr), (LDAPRB GPR64sp:$ptr)>;
// 16-bit loads
- def : Pat<(acquiring_load<atomic_load_16> GPR64sp:$ptr), (LDAPRH GPR64sp:$ptr)>;
+ def : Pat<(acquiring_load<atomic_load_azext_16> GPR64sp:$ptr), (LDAPRH GPR64sp:$ptr)>;
// 32-bit loads
def : Pat<(acquiring_load<atomic_load_32> GPR64sp:$ptr), (LDAPRW GPR64sp:$ptr)>;
// 64-bit loads
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 6a5065cd4a0e8..6cc76b44f1e14 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -502,10 +502,6 @@ def zextloadi16_#as : PatFrag<(ops node:$ptr), (zextloadi16 node:$ptr)> {
let IsLoad = 1;
}
-def atomic_load_8_#as : PatFrag<(ops node:$ptr), (atomic_load_8 node:$ptr)> {
- let IsAtomic = 1;
-}
-
def atomic_load_16_#as : PatFrag<(ops node:$ptr), (atomic_load_16 node:$ptr)> {
let IsAtomic = 1;
}
@@ -526,6 +522,10 @@ def atomic_load_sext_8_#as : PatFrag<(ops node:$ptr), (atomic_load_sext_8 node:$
let IsAtomic = 1;
}
+def atomic_load_aext_8_#as : PatFrag<(ops node:$ptr), (atomic_load_aext_8 node:$ptr)> {
+ let IsAtomic = 1;
+}
+
def atomic_load_zext_16_#as : PatFrag<(ops node:$ptr), (atomic_load_zext_16 node:$ptr)> {
let IsAtomic = 1;
}
@@ -534,6 +534,10 @@ def atomic_load_sext_16_#as : PatFrag<(ops node:$ptr), (atomic_load_sext_16 node
let IsAtomic = 1;
}
+def atomic_load_aext_16_#as : PatFrag<(ops node:$ptr), (atomic_load_aext_16 node:$ptr)> {
+ let IsAtomic = 1;
+}
+
} // End let AddressSpaces
} // End foreach as
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index f4edfe1387731..7d64a3dd240c8 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -953,11 +953,12 @@ defm BUFFER_LOAD_DWORDX4 : MUBUF_Pseudo_Loads_Lds <
"buffer_load_dwordx4", v4i32, /*LDSPred=*/HasGFX950Insts
>;
-defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_8_global>;
+defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_aext_8_global>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, atomic_load_zext_8_global>;
-defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i32, atomic_load_16_global>;
+defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i32, atomic_load_aext_16_global>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i32, atomic_load_zext_16_global>;
-defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i16, atomic_load_8_global>;
+defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i16, atomic_load_aext_8_global>;
+defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i16, atomic_load_zext_8_global>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_USHORT", i16, atomic_load_16_global>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, extloadi8_global>;
defm : MUBUF_Pseudo_Load_Pats<"BUFFER_LOAD_UBYTE", i32, zextloadi8_global>;
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index bc1db52eeeb2f..74884a2207079 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -853,14 +853,14 @@ foreach vt = Reg32Types.types in {
defm : DSReadPat_mc <DS_READ_B32, vt, "load_local">;
}
-defm : DSReadPat_t16 <DS_READ_U8, i16, "atomic_load_8_local">;
-defm : DSReadPat_mc <DS_READ_U8, i32, "atomic_load_8_local">;
+defm : DSReadPat_t16 <DS_READ_U8, i16, "atomic_load_aext_8_local">;
+defm : DSReadPat_mc <DS_READ_U8, i32, "atomic_load_aext_8_local">;
defm : DSReadPat_t16 <DS_READ_U8, i16, "atomic_load_zext_8_local">;
defm : DSReadPat_mc <DS_READ_U8, i32, "atomic_load_zext_8_local">;
defm : DSReadPat_t16 <DS_READ_I8, i16, "atomic_load_sext_8_local">;
defm : DSReadPat_mc <DS_READ_I8, i32, "atomic_load_sext_8_local">;
defm : DSReadPat_t16 <DS_READ_U16, i16, "atomic_load_16_local">;
-defm : DSReadPat_mc <DS_READ_U16, i32, "atomic_load_16_local">;
+defm : DSReadPat_mc <DS_READ_U16, i32, "atomic_load_aext_16_local">;
defm : DSReadPat_mc <DS_READ_U16, i32, "atomic_load_zext_16_local">;
defm : DSReadPat_mc <DS_READ_I16, i32, "atomic_load_sext_16_local">;
defm : DSReadPat_mc <DS_READ_B32, i32, "atomic_load_32_local">;
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index 02a5d50ff3ae6..d8bb6e4378924 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -1536,14 +1536,13 @@ multiclass ScratchFLATLoadPats_D16_t16<string inst, SDPatternOperator node, Valu
let OtherPredicates = [HasFlatAddressSpace] in {
-def : FlatLoadPat <FLAT_LOAD_UBYTE, atomic_load_8_flat, i32>;
-def : FlatLoadPat <FLAT_LOAD_UBYTE, atomic_load_8_flat, i16>;
+def : FlatLoadPat <FLAT_LOAD_UBYTE, atomic_load_aext_8_flat, i32>;
+def : FlatLoadPat <FLAT_LOAD_UBYTE, atomic_load_aext_8_flat, i16>;
def : FlatLoadPat <FLAT_LOAD_UBYTE, atomic_load_zext_8_flat, i32>;
def : FlatLoadPat <FLAT_LOAD_UBYTE, atomic_load_zext_8_flat, i16>;
-def : FlatLoadPat <FLAT_LOAD_USHORT, atomic_load_16_flat, i32>;
+def : FlatLoadPat <FLAT_LOAD_USHORT, atomic_load_aext_16_flat, i32>;
def : FlatLoadPat <FLAT_LOAD_USHORT, atomic_load_16_flat, i16>;
def : FlatLoadPat <FLAT_LOAD_USHORT, atomic_load_zext_16_flat, i32>;
-def : FlatLoadPat <FLAT_LOAD_USHORT, atomic_load_zext_16_flat, i16>;
def : FlatLoadPat <FLAT_LOAD_UBYTE, extloadi8_flat, i32>;
def : FlatLoadPat <FLAT_LOAD_UBYTE, zextloadi8_flat, i32>;
def : FlatLoadPat <FLAT_LOAD_SBYTE, sextloadi8_flat, i32>;
@@ -1678,11 +1677,11 @@ def : FlatLoadPat_D16 <FLAT_LOAD_SHORT_D16, load_d16_lo_flat, v2f16>;
let OtherPredicates = [HasFlatGlobalInsts] in {
-defm : GlobalFLATLoadPats <GLOBAL_LOAD_UBYTE, atomic_load_8_global, i32>;
-defm : GlobalFLATLoadPats <GLOBAL_LOAD_UBYTE, atomic_load_8_global, i16>;
+defm : GlobalFLATLoadPats <GLOBAL_LOAD_UBYTE, atomic_load_aext_8_global, i32>;
+defm : GlobalFLATLoadPats <GLOBAL_LOAD_UBYTE, atomic_load_aext_8_global, i16>;
defm : GlobalFLATLoadPats <GLOBAL_LOAD_UBYTE, atomic_load_zext_8_global, i32>;
defm : GlobalFLATLoadPats <GLOBAL_LOAD_UBYTE, atomic_load_zext_8_global, i16>;
-defm : GlobalFLATLoadPats <GLOBAL_LOAD_USHORT, atomic_load_16_global, i32>;
+defm : GlobalFLATLoadPats <GLOBAL_LOAD_USHORT, atomic_load_aext_16_global, i32>;
defm : GlobalFLATLoadPats <GLOBAL_LOAD_USHORT, atomic_load_16_global, i16>;
defm : GlobalFLATLoadPats <GLOBAL_LOAD_USHORT, atomic_load_zext_16_global, i32>;
defm : GlobalFLATLoadPats <GLOBAL_LOAD_USHORT, atomic_load_zext_16_global, i16>;
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 51433020eeae7..ec1fd6fb60d57 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -373,10 +373,10 @@ def atomic_load_sext_glue :
let IsSignExtLoad = true;
}
-def atomic_load_8_glue : PatFrag<(ops node:$ptr),
- (AMDGPUatomic_ld_glue node:$ptr)> {
- let IsAtomic = 1;
- let MemoryVT = i8;
+def atomic_load_aext_glue :
+ PatFrag<(ops node:$ptr), (AMDGPUatomic_ld_glue node:$ptr)> {
+ let IsAtomic = true; // FIXME: Should be IsLoad and/or IsAtomic?
+ let IsAnyExtLoad = true;
}
def atomic_load_16_glue : PatFrag<(ops node:$ptr),
@@ -409,6 +409,12 @@ def atomic_load_sext_8_glue : PatFrag<(ops node:$ptr),
let MemoryVT = i8;
}
+def atomic_load_aext_8_glue : PatFrag<(ops node:$ptr),
+ (atomic_load_aext_glue node:$ptr)> {
+ let IsAtomic = 1;
+ let MemoryVT = i8;
+}
+
def atomic_load_zext_16_glue : PatFrag<(ops node:$ptr),
(atomic_load_zext_glue node:$ptr)> {
let IsAtomic = 1;
@@ -421,6 +427,12 @@ def atomic_load_sext_16_glue : PatFrag<(ops node:$ptr),
let MemoryVT = i16;
}
+def atomic_load_aext_16_glue : PatFrag<(ops node:$ptr),
+ (atomic_load_aext_glue node:$ptr)> {
+ let IsAtomic = 1;
+ let MemoryVT = i16;
+}
+
def extload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
let IsLoad = 1;
let IsAnyExtLoad = 1;
@@ -494,8 +506,6 @@ def load_align16_local_m0 : PatFrag<(ops node:$ptr),
}
let IsAtomic = 1, AddressSpaces = LoadAddress_local.AddrSpaces in {
-def atomic_load_8_local_m0 : PatFrag<(ops node:$ptr),
- (atomic_load_8_glue node:$ptr)>;
def atomic_load_16_local_m0 : PatFrag<(ops node:$ptr),
(atomic_load_16_glue node:$ptr)>;
def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr),
@@ -507,10 +517,14 @@ def atomic_load_zext_8_local_m0 : PatFrag<(ops node:$ptr),
(atomic_load_zext_8_glue node:$ptr)>;
def atomic_load_sext_8_local_m0 : PatFrag<(ops node:$ptr),
(atomic_load_sext_8_glue node:$ptr)>;
+def atomic_load_aext_8_local_m0 : PatFrag<(ops node:$ptr),
+ (atomic_load_aext_8_glue node:$ptr)>;
def atomic_load_zext_16_local_m0 : PatFrag<(ops node:$ptr),
(atomic_load_zext_16_glue node:$ptr)>;
def atomic_load_sext_16_local_m0 : PatFrag<(ops node:$ptr),
(atomic_load_sext_16_glue node:$ptr)>;
+def atomic_load_aext_16_local_m0 : PatFrag<(ops node:$ptr),
+ (atomic_load_aext_16_glue node:$ptr)>;
} // End let AddressSpaces = LoadAddress_local.AddrSpaces
diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td
index d6387ff848593..1ce9190a68f3c 100644
--- a/llvm/lib/Target/ARM/ARMInstrInfo.td
+++ b/llvm/lib/Target/ARM/ARMInstrInfo.td
@@ -5376,14 +5376,14 @@ def : ARMPat<(stlex_1 (and GPR:$Rt, 0xff), addr_offset_none:$addr),
def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr),
(STLEXH GPR:$Rt, addr_offset_none:$addr)>;
-class acquiring_load<PatFrag base>
+class acquiring_load<PatFrags base>
: PatFrag<(ops node:$ptr), (base node:$ptr), [{
AtomicOrdering Ordering = cast<AtomicSDNode>(N)->getSuccessOrdering();
return isAcquireOrStronger(Ordering);
}]>;
-def atomic_load_acquire_8 : acquiring_load<atomic_load_8>;
-def atomic_load_acquire_16 : acquiring_load<atomic_load_16>;
+def atomic_load_azext_acquire_8 : acquiring_load<atomic_load_azext_8>;
+def atomic_load_azext_acquire_16 : acquiring_load<atomic_load_azext_16>;
def atomic_load_acquire_32 : acquiring_load<atomic_load_32>;
class releasing_store<PatFrag base>
@@ -5397,8 +5397,8 @@ def atomic_store_release_16 : releasing_store<atomic_store_16>;
def atomic_store_release_32 : releasing_store<atomic_store_32>;
let AddedComplexity = 8 in {
- def : ARMPat<(atomic_load_acquire_8 addr_offset_none:$addr), (LDAB addr_offset_none:$addr)>;
- def : ARMPat<(atomic_load_acquire_16 addr_offset_none:$addr), (LDAH addr_offset_none:$addr)>;
+ def : ARMPat<(atomic_load_azext_acquire_8 addr_offset_none:$addr), (LDAB addr_offset_none:$addr)>;
+ def : ARMPat<(atomic_load_azext_acquire_16 addr_offset_none:$addr), (LDAH addr_offset_none:$addr)>;
def : ARMPat<(atomic_load_acquire_32 addr_offset_none:$addr), (LDA addr_offset_none:$addr)>;
def : ARMPat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val), (STLB GPR:$val, addr_offset_none:$addr)>;
def : ARMPat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (STLH GPR:$val, addr_offset_none:$addr)>;
@@ -6214,11 +6214,11 @@ def : ARMV6Pat<(add GPR:$Rn, (sext_inreg GPRnopc:$Rm, i16)),
(SXTAH GPR:$Rn, GPRnopc:$Rm, 0)>;
// Atomic load/store patterns
-def : ARMPat<(atomic_load_8 ldst_so_reg:$src),
+def : ARMPat<(atomic_load_azext_8 ldst_so_reg:$src),
(LDRBrs ldst_so_reg:$src)>;
-def : ARMPat<(atomic_load_8 addrmode_imm12:$src),
+def : ARMPat<(atomic_load_azext_8 addrmode_imm12:$src),
(LDRBi12 addrmode_imm12:$src)>;
-def : ARMPat<(atomic_load_16 addrmode3:$src),
+def : ARMPat<(atomic_load_azext_16 addrmode3:$src),
(LDRH addrmode3:$src)>;
def : ARMPat<(atomic_load_32 ldst_so_reg:$src),
(LDRrs ldst_so_reg:$src)>;
diff --git a/llvm/lib/Target/ARM/ARMInstrThumb.td b/llvm/lib/Target/ARM/ARMInstrThumb.td
index b69bc601a0cdc..feda22c89e925 100644
--- a/llvm/lib/Target/ARM/ARMInstrThumb.td
+++ b/llvm/lib/Target/ARM/ARMInstrThumb.td
@@ -1697,13 +1697,13 @@ def : T1Pat<(sextloadi16 t_addrmode_is2:$addr),
def : T1Pat<(sextloadi16 t_addrmode_rr:$addr),
(tASRri (tLSLri (tLDRHr t_addrmode_rr:$addr), 16), 16)>;
-def : T1Pat<(atomic_load_8 t_addrmode_is1:$src),
+def : T1Pat<(atomic_load_azext_8 t_addrmode_is1:$src),
(tLDRBi t_addrmode_is1:$src)>;
-def : T1Pat<(atomic_load_8 t_addrmode_rr:$src),
+def : T1Pat<(atomic_load_azext_8 t_addrmode_rr:$src),
(tLDRBr t_addrmode_rr:$src)>;
-def : T1Pat<(atomic_load_16 t_addrmode_is2:$src),
+def : T1Pat<(atomic_load_azext_16 t_addrmode_is2:$src),
(tLDRHi t_addrmode_is2:$src)>;
-def : T1Pat<(atomic_load_16 t_addrmode_rr:$src),
+def : T1Pat<(atomic_load_azext_16 t_addrmode_rr:$src),
(tLDRHr t_addrmode_rr:$src)>;
def : T1Pat<(atomic_load_32 t_addrmode_is4:$src),
(tLDRi t_addrmode_is4:$src)>;
diff --git a/llvm/lib/Target/ARM/ARMInstrThumb2.td b/llvm/lib/Target/ARM/ARMInstrThumb2.td
index 9f80af07df0fc..f9a873a9483de 100644
--- a/llvm/lib/Target/ARM/ARMInstrThumb2.td
+++ b/llvm/lib/Target/ARM/ARMInstrThumb2.td
@@ -4899,17 +4899,17 @@ def : T2Pat<(add rGPR:$Rn, (sext_inreg rGPR:$Rm, i16)),
Requires<[HasDSP, IsThumb2]>;
// Atomic load/store patterns
-def : T2Pat<(atomic_load_8 t2addrmode_imm12:$addr),
+def : T2Pat<(atomic_load_azext_8 t2addrmode_imm12:$addr),
(t2LDRBi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(atomic_load_8 t2addrmode_negimm8:$addr),
+def : T2Pat<(atomic_load_azext_8 t2addrmode_negimm8:$addr),
(t2LDRBi8 t2addrmode_negimm8:$addr)>;
-def : T2Pat<(atomic_load_8 t2addrmode_so_reg:$addr),
+def : T2Pat<(atomic_load_azext_8 t2addrmode_so_reg:$addr),
(t2LDRBs t2addrmode_so_reg:$addr)>;
-def : T2Pat<(atomic_load_16 t2addrmode_imm12:$addr),
+def : T2Pat<(atomic_load_azext_16 t2addrmode_imm12:$addr),
(t2LDRHi12 t2addrmode_imm12:$addr)>;
-def : T2Pat<(atomic_load_16 t2addrmode_negimm8:$addr),
+def : T2Pat<(atomic_load_azext_16 t2addrmode_negimm8:$addr),
(t2LDRHi8 t2addrmode_negimm8:$addr)>;
-def : T2Pat<(atomic_load_16 t2addrmode_so_reg:$addr),
+def : T2Pat<(atomic_load_azext_16 t2addrmode_so_reg:$addr),
(t2LDRHs t2addrmode_so_reg:$addr)>;
def : T2Pat<(atomic_load_32 t2addrmode_imm12:$addr),
(t2LDRi12 t2addrmode_imm12:$addr)>;
@@ -4937,8 +4937,8 @@ def : T2Pat<(atomic_store_32 GPR:$val, t2addrmode_so_reg:$addr),
(t2STRs GPR:$val, t2addrmode_so_reg:$addr)>;
let AddedComplexity = 8, Predicates = [IsThumb, HasAcquireRelease, HasV7Clrex] in {
- def : Pat<(atomic_load_acquire_8 addr_offset_none:$addr), (t2LDAB addr_offset_none:$addr)>;
- def : Pat<(atomic_load_acquire_16 addr_offset_none:$addr), (t2LDAH addr_offset_none:$addr)>;
+ def : Pat<(atomic_load_azext_acquire_8 addr_offset_none:$addr), (t2LDAB addr_offset_none:$addr)>;
+ def : Pat<(atomic_load_azext_acquire_16 addr_offset_none:$addr), (t2LDAH addr_offset_none:$addr)>;
def : Pat<(atomic_load_acquire_32 addr_offset_none:$addr), (t2LDA addr_offset_none:$addr)>;
def : Pat<(atomic_store_release_8 addr_offset_none:$addr, GPR:$val), (t2STLB GPR:$val, addr_offset_none:$addr)>;
def : Pat<(atomic_store_release_16 addr_offset_none:$addr, GPR:$val), (t2STLH GPR:$val, addr_offset_none:$addr)>;
diff --git a/llvm/lib/Target/Hexagon/HexagonPatterns.td b/llvm/lib/Target/Hexagon/HexagonPatterns.td
index 244f204539c89..1be16c1739512 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatterns.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatterns.td
@@ -2130,7 +2130,7 @@ def sextloadv4i8: PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
// Patterns to select load-indexed: Rs + Off.
// - frameindex [+ imm],
-multiclass Loadxfi_pat<PatFrag Load, ValueType VT, PatLeaf ImmPred,
+multiclass Loadxfi_pat<PatFrags Load, ValueType VT, PatLeaf ImmPred,
InstHexagon MI> {
def: Pat<(VT (Load (add (i32 AddrFI:$fi), ImmPred:$Off))),
(VT (MI AddrFI:$fi, imm:$Off))>;
@@ -2141,7 +2141,7 @@ multiclass Loadxfi_pat<PatFrag Load, ValueType VT, PatLeaf ImmPred,
// Patterns to select load-indexed: Rs + Off.
// - base reg [+ imm]
-multiclass Loadxgi_pat<PatFrag Load, ValueType VT, PatLeaf ImmPred,
+multiclass Loadxgi_pat<PatFrags Load, ValueType VT, PatLeaf ImmPred,
InstHexagon MI> {
def: Pat<(VT (Load (add I32:$Rs, ImmPred:$Off))),
(VT (MI IntRegs:$Rs, imm:$Off))>;
@@ -2151,7 +2151,7 @@ multiclass Loadxgi_pat<PatFrag Load, ValueType VT, PatLeaf ImmPred,
}
// Patterns to select load-indexed: Rs + Off. Combines Loadxfi + Loadxgi.
-multiclass Loadxi_pat<PatFrag Load, ValueType VT, PatLeaf ImmPred,
+multiclass Loadxi_pat<PatFrags Load, ValueType VT, PatLeaf ImmPred,
InstHexagon MI> {
defm: Loadxfi_pat<Load, VT, ImmPred, MI>;
defm: Loadxgi_pat<Load, VT, ImmPred, MI>;
@@ -2221,7 +2221,7 @@ class Loadxum_pat<PatFrag Load, ValueType VT, PatFrag ImmPred, PatFrag ValueMod,
(VT (ValueMod (MI IntRegs:$Rt, imm:$u2, ImmPred:$Addr)))>;
// Pattern to select load absolute.
-class Loada_pat<PatFrag Load, ValueType VT, PatFrag Addr, InstHexagon MI>
+class Loada_pat<PatFrags Load, ValueType VT, PatFrag Addr, InstHexagon MI>
: Pat<(VT (Load Addr:$addr)), (MI Addr:$addr)>;
// Pattern to select load absolute with value modifier.
@@ -2256,8 +2256,8 @@ let AddedComplexity = 20 in {
defm: Loadxi_pat<load, f64, anyimm3, L2_loadrd_io>;
// No sextloadi1.
- defm: Loadxi_pat<atomic_load_8 , i32, anyimm0, L2_loadrub_io>;
- defm: Loadxi_pat<atomic_load_16, i32, anyimm1, L2_loadruh_io>;
+ defm: Loadxi_pat<atomic_load_azext_8 , i32, anyimm0, L2_loadrub_io>;
+ defm: Loadxi_pat<atomic_load_azext_16, i32, anyimm1, L2_loadruh_io>;
defm: Loadxi_pat<atomic_load_32, i32, anyimm2, L2_loadri_io>;
defm: Loadxi_pat<atomic_load_64, i64, anyimm3, L2_loadrd_io>;
}
@@ -2418,8 +2418,8 @@ let AddedComplexity = 60 in {
def: Loada_pat<load, f32, anyimm2, PS_loadriabs>;
def: Loada_pat<load, f64, anyimm3, PS_loadrdabs>;
- def: Loada_pat<atomic_load_8, i32, anyimm0, PS_loadrubabs>;
- def: Loada_pat<atomic_load_16, i32, anyimm1, PS_loadruhabs>;
+ def: Loada_pat<atomic_load_azext_8, i32, anyimm0, PS_loadrubabs>;
+ def: Loada_pat<atomic_load_azext_16, i32, anyimm1, PS_loadruhabs>;
def: Loada_pat<atomic_load_32, i32, anyimm2, PS_loadriabs>;
def: Loada_pat<atomic_load_64, i64, anyimm3, PS_loadrdabs>;
}
@@ -2463,8 +2463,8 @@ let AddedComplexity = 100 in {
def: Loada_pat<load, f32, addrgp, L2_loadrigp>;
def: Loada_pat<load, f64, addrgp, L2_loadrdgp>;
- def: Loada_pat<atomic_load_8, i32, addrgp, L2_loadrubgp>;
- def: Loada_pat<atomic_load_16, i32, addrgp, L2_loadruhgp>;
+ def: Loada_pat<atomic_load_azext_8, i32, addrgp, L2_loadrubgp>;
+ def: Loada_pat<atomic_load_azext_16, i32, addrgp, L2_loadruhgp>;
def: Loada_pat<atomic_load_32, i32, addrgp, L2_loadrigp>;
def: Loada_pat<atomic_load_64, i64, addrgp, L2_loadrdgp>;
}
diff --git a/llvm/lib/Target/Lanai/LanaiInstrInfo.td b/llvm/lib/Target/Lanai/LanaiInstrInfo.td
index 6feed27b7047b..1d968fa391c2a 100644
--- a/llvm/lib/Target/Lanai/LanaiInstrInfo.td
+++ b/llvm/lib/Target/Lanai/LanaiInstrInfo.td
@@ -845,7 +845,7 @@ def : Pat<(extloadi16 ADDRspls:$src), (i32 (LDHz_RI ADDRspls:$src))>;
// Loads up to 32-bits are already atomic.
// TODO: This is a workaround for a particular failing case and should be
// handled more generally.
-def : Pat<(atomic_load_8 ADDRspls:$src), (i32 (LDBz_RI ADDRspls:$src))>;
+def : Pat<(atomic_load_azext_8 ADDRspls:$src), (i32 (LDBz_RI ADDRspls:$src))>;
// GlobalAddress, ExternalSymbol, Jumptable, ConstantPool
def : Pat<(LanaiHi tglobaladdr:$dst), (MOVHI tglobaladdr:$dst)>;
diff --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index 775d9289af7c4..b607dcb04149b 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -1779,7 +1779,7 @@ def : Pat<(bitreverse (bswap GPR:$rj)), (BITREV_8B GPR:$rj)>;
/// Loads
-multiclass LdPat<PatFrag LoadOp, LAInst Inst, ValueType vt = GRLenVT> {
+multiclass LdPat<PatFrags LoadOp, LAInst Inst, ValueType vt = GRLenVT> {
def : Pat<(vt (LoadOp BaseAddr:$rj)), (Inst BaseAddr:$rj, 0)>;
def : Pat<(vt (LoadOp (AddrConstant GPR:$rj, simm12:$imm12))),
(Inst GPR:$rj, simm12:$imm12)>;
@@ -1890,9 +1890,10 @@ def : Pat<(atomic_fence 5, timm), (DBAR 0b10010)>; // release
def : Pat<(atomic_fence 6, timm), (DBAR 0b10000)>; // acqrel
def : Pat<(atomic_fence 7, timm), (DBAR 0b10000)>; // seqcst
-defm : LdPat<atomic_load_8, LD_B>;
-defm : LdPat<atomic_load_16, LD_H>;
-defm : LdPat<atomic_load_32, LD_W>;
+defm : LdPat<atomic_load_asext_8, LD_B>;
+defm : LdPat<atomic_load_asext_16, LD_H>;
+defm : LdPat<atomic_load_32, LD_W>, Requires<[IsLA32]>;
+defm : LdPat<atomic_load_asext_32, LD_W>, Requires<[IsLA64]>;
class release_seqcst_store<PatFrag base>
: PatFrag<(ops node:$val, node:$ptr), (base node:$val, node:$ptr), [{
diff --git a/llvm/lib/Target/Mips/MicroMipsInstrInfo.td b/llvm/lib/Target/Mips/MicroMipsInstrInfo.td
index 43b8eb7faf0ec..661c18e8c3952 100644
--- a/llvm/lib/Target/Mips/MicroMipsInstrInfo.td
+++ b/llvm/lib/Target/Mips/MicroMipsInstrInfo.td
@@ -1190,8 +1190,8 @@ def : WrapperPat<tblockaddress, ADDiu_MM, GPR32>, ISA_MICROMIPS;
def : WrapperPat<tjumptable, ADDiu_MM, GPR32>, ISA_MICROMIPS;
def : WrapperPat<tglobaltlsaddr, ADDiu_MM, GPR32>, ISA_MICROMIPS;
-def : MipsPat<(atomic_load_8 addr:$a), (LB_MM addr:$a)>, ISA_MICROMIPS;
-def : MipsPat<(atomic_load_16 addr:$a), (LH_MM addr:$a)>, ISA_MICROMIPS;
+def : MipsPat<(atomic_load_asext_8 addr:$a), (LB_MM addr:$a)>, ISA_MICROMIPS;
+def : MipsPat<(atomic_load_asext_16 addr:$a), (LH_MM addr:$a)>, ISA_MICROMIPS;
def : MipsPat<(atomic_load_32 addr:$a), (LW_MM addr:$a)>, ISA_MICROMIPS;
def : MipsPat<(i32 immLi16:$imm),
diff --git a/llvm/lib/Target/Mips/Mips64InstrInfo.td b/llvm/lib/Target/Mips/Mips64InstrInfo.td
index f19eaf7a67f73..d028c95287a70 100644
--- a/llvm/lib/Target/Mips/Mips64InstrInfo.td
+++ b/llvm/lib/Target/Mips/Mips64InstrInfo.td
@@ -894,9 +894,9 @@ def : MipsPat<(brcond (i32 (setne (and i32:$lhs, PowerOf2LO_i32:$mask), 0)), bb:
ASE_MIPS64_CNMIPS;
// Atomic load patterns.
-def : MipsPat<(atomic_load_8 addr:$a), (LB64 addr:$a)>, ISA_MIPS3, GPR_64;
-def : MipsPat<(atomic_load_16 addr:$a), (LH64 addr:$a)>, ISA_MIPS3, GPR_64;
-def : MipsPat<(atomic_load_32 addr:$a), (LW64 addr:$a)>, ISA_MIPS3, GPR_64;
+def : MipsPat<(atomic_load_asext_8 addr:$a), (LB64 addr:$a)>, ISA_MIPS3, GPR_64;
+def : MipsPat<(atomic_load_asext_16 addr:$a), (LH64 addr:$a)>, ISA_MIPS3, GPR_64;
+def : MipsPat<(atomic_load_asext_32 addr:$a), (LW64 addr:$a)>, ISA_MIPS3, GPR_64;
def : MipsPat<(atomic_load_64 addr:$a), (LD addr:$a)>, ISA_MIPS3, GPR_64;
// Atomic store patterns.
diff --git a/llvm/lib/Target/Mips/MipsInstrInfo.td b/llvm/lib/Target/Mips/MipsInstrInfo.td
index 557e6a2c72e27..f17781dcab726 100644
--- a/llvm/lib/Target/Mips/MipsInstrInfo.td
+++ b/llvm/lib/Target/Mips/MipsInstrInfo.td
@@ -3358,8 +3358,8 @@ let AdditionalPredicates = [NotInMicroMips] in {
}
// Atomic load patterns.
- def : MipsPat<(atomic_load_8 addr:$a), (LB addr:$a)>, ISA_MIPS1;
- def : MipsPat<(atomic_load_16 addr:$a), (LH addr:$a)>, ISA_MIPS1;
+ def : MipsPat<(atomic_load_asext_8 addr:$a), (LB addr:$a)>, ISA_MIPS1;
+ def : MipsPat<(atomic_load_asext_16 addr:$a), (LH addr:$a)>, ISA_MIPS1;
def : MipsPat<(atomic_load_32 addr:$a), (LW addr:$a)>, ISA_MIPS1;
// Atomic store patterns.
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index e2864c2405967..cbf5d0188b79e 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -5084,11 +5084,11 @@ defm : TrapExtendedMnemonic<"lng", 6>;
defm : TrapExtendedMnemonic<"u", 31>;
// Atomic loads
-def : Pat<(i32 (atomic_load_8 DForm:$src)), (LBZ memri:$src)>;
-def : Pat<(i32 (atomic_load_16 DForm:$src)), (LHZ memri:$src)>;
+def : Pat<(i32 (atomic_load_azext_8 DForm:$src)), (LBZ memri:$src)>;
+def : Pat<(i32 (atomic_load_azext_16 DForm:$src)), (LHZ memri:$src)>;
def : Pat<(i32 (atomic_load_32 DForm:$src)), (LWZ memri:$src)>;
-def : Pat<(i32 (atomic_load_8 XForm:$src)), (LBZX memrr:$src)>;
-def : Pat<(i32 (atomic_load_16 XForm:$src)), (LHZX memrr:$src)>;
+def : Pat<(i32 (atomic_load_azext_8 XForm:$src)), (LBZX memrr:$src)>;
+def : Pat<(i32 (atomic_load_azext_16 XForm:$src)), (LHZX memrr:$src)>;
def : Pat<(i32 (atomic_load_32 XForm:$src)), (LWZX memrr:$src)>;
// Atomic stores
diff --git a/llvm/lib/Target/PowerPC/PPCInstrP10.td b/llvm/lib/Target/PowerPC/PPCInstrP10.td
index 39a1ab0d388a7..3f655d9738414 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrP10.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrP10.td
@@ -1275,9 +1275,9 @@ let Predicates = [PCRelativeMemops] in {
(PSTDpc $RS, $ga, 0)>;
// Atomic Load
- def : Pat<(i32 (atomic_load_8 (PPCmatpcreladdr PCRelForm:$ga))),
+ def : Pat<(i32 (atomic_load_azext_8 (PPCmatpcreladdr PCRelForm:$ga))),
(PLBZpc $ga, 0)>;
- def : Pat<(i32 (atomic_load_16 (PPCmatpcreladdr PCRelForm:$ga))),
+ def : Pat<(i32 (atomic_load_azext_16 (PPCmatpcreladdr PCRelForm:$ga))),
(PLHZpc $ga, 0)>;
def : Pat<(i32 (atomic_load_32 (PPCmatpcreladdr PCRelForm:$ga))),
(PLWZpc $ga, 0)>;
@@ -2360,8 +2360,8 @@ let Predicates = [PrefixInstrs] in {
def : Pat<(store i64:$rS, PDForm:$dst), (PSTD g8rc:$rS, memri34:$dst)>;
// Atomic Load
- def : Pat<(i32 (atomic_load_8 PDForm:$src)), (PLBZ memri34:$src)>;
- def : Pat<(i32 (atomic_load_16 PDForm:$src)), (PLHZ memri34:$src)>;
+ def : Pat<(i32 (atomic_load_azext_8 PDForm:$src)), (PLBZ memri34:$src)>;
+ def : Pat<(i32 (atomic_load_azext_16 PDForm:$src)), (PLHZ memri34:$src)>;
def : Pat<(i32 (atomic_load_32 PDForm:$src)), (PLWZ memri34:$src)>;
def : Pat<(i64 (atomic_load_64 PDForm:$src)), (PLD memri34:$src)>;
diff --git a/llvm/lib/Target/RISCV/RISCVGISel.td b/llvm/lib/Target/RISCV/RISCVGISel.td
index 5045e5eaa9408..36f26620655da 100644
--- a/llvm/lib/Target/RISCV/RISCVGISel.td
+++ b/llvm/lib/Target/RISCV/RISCVGISel.td
@@ -109,7 +109,7 @@ def : LdPat<extloadi8, LBU, i16>; // Prefer unsigned due to no c.lb in Zcb.
def : StPat<truncstorei8, SB, GPR, i16>;
let Predicates = [HasAtomicLdSt] in {
- def : LdPat<atomic_load_8, LB, i16>;
+ def : LdPat<atomic_load_aext_8, LB, i16>;
def : LdPat<atomic_load_16, LH, i16>;
def : StPat<atomic_store_8, SB, GPR, i16>;
diff --git a/llvm/lib/Target/Sparc/SparcInstrInfo.td b/llvm/lib/Target/Sparc/SparcInstrInfo.td
index d5af2000d0481..b867a1dab7e24 100644
--- a/llvm/lib/Target/Sparc/SparcInstrInfo.td
+++ b/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -1919,10 +1919,10 @@ let Predicates = [HasV9] in
def : Pat<(atomic_fence timm, timm), (MEMBARi 0xf)>;
// atomic_load addr -> load addr
-def : Pat<(i32 (atomic_load_8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
-def : Pat<(i32 (atomic_load_8 ADDRri:$src)), (LDUBri ADDRri:$src)>;
-def : Pat<(i32 (atomic_load_16 ADDRrr:$src)), (LDUHrr ADDRrr:$src)>;
-def : Pat<(i32 (atomic_load_16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
+def : Pat<(i32 (atomic_load_azext_8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_azext_8 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+def : Pat<(i32 (atomic_load_azext_16 ADDRrr:$src)), (LDUHrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_azext_16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
def : Pat<(i32 (atomic_load_32 ADDRrr:$src)), (LDrr ADDRrr:$src)>;
def : Pat<(i32 (atomic_load_32 ADDRri:$src)), (LDri ADDRri:$src)>;
diff --git a/llvm/lib/Target/VE/VEInstrInfo.td b/llvm/lib/Target/VE/VEInstrInfo.td
index b459fbcad909f..6a6d3e069d218 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.td
+++ b/llvm/lib/Target/VE/VEInstrInfo.td
@@ -1794,8 +1794,8 @@ multiclass ATMLDm<SDPatternOperator from,
def : Pat<(iAny (from ADDRzri:$addr)), (tozri MEMzri:$addr)>;
def : Pat<(iAny (from ADDRzii:$addr)), (tozii MEMzii:$addr)>;
}
-defm : ATMLDm<atomic_load_8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
-defm : ATMLDm<atomic_load_16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
+defm : ATMLDm<atomic_load_aext_8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
+defm : ATMLDm<atomic_load_aext_16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
defm : ATMLDm<atomic_load_32, LDLZXrri, LDLZXrii, LDLZXzri, LDLZXzii>;
defm : ATMLDm<atomic_load_64, LDrri, LDrii, LDzri, LDzii>;
@@ -1824,8 +1824,8 @@ multiclass SXATMLD32m<SDPatternOperator from,
def : Pat<(i64 (sext (from ADDRzii:$addr))),
(i2l (tozii MEMzii:$addr))>;
}
-defm : SXATMLDm<atomic_load_8, i8, LD1BSXrri, LD1BSXrii, LD1BSXzri, LD1BSXzii>;
-defm : SXATMLDm<atomic_load_16, i16, LD2BSXrri, LD2BSXrii, LD2BSXzri,
+defm : SXATMLDm<atomic_load_aext_8, i8, LD1BSXrri, LD1BSXrii, LD1BSXzri, LD1BSXzii>;
+defm : SXATMLDm<atomic_load_aext_16, i16, LD2BSXrri, LD2BSXrii, LD2BSXzri,
LD2BSXzii>;
defm : SXATMLD32m<atomic_load_32, LDLSXrri, LDLSXrii, LDLSXzri, LDLSXzii>;
@@ -1854,9 +1854,9 @@ multiclass ZXATMLD32m<SDPatternOperator from,
def : Pat<(i64 (zext (from ADDRzii:$addr))),
(i2l (tozii MEMzii:$addr))>;
}
-defm : ZXATMLDm<atomic_load_8, 0xFF, LD1BZXrri, LD1BZXrii, LD1BZXzri,
+defm : ZXATMLDm<atomic_load_aext_8, 0xFF, LD1BZXrri, LD1BZXrii, LD1BZXzri,
LD1BZXzii>;
-defm : ZXATMLDm<atomic_load_16, 0xFFFF, LD2BZXrri, LD2BZXrii, LD2BZXzri,
+defm : ZXATMLDm<atomic_load_aext_16, 0xFFFF, LD2BZXrri, LD2BZXrii, LD2BZXzri,
LD2BZXzii>;
defm : ZXATMLD32m<atomic_load_32, LDLZXrri, LDLZXrii, LDLZXzri, LDLZXzii>;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
index 46bd5e42a9d52..f7f8d63b1dd57 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
@@ -150,10 +150,10 @@ defm ATOMIC_LOAD32_U_I64 : AtomicLoad<I64, "i64.atomic.load32_u", 0x16>;
// zero-extending.
def zext_aload_8_64 :
PatFrag<(ops node:$addr),
- (i64 (zext (i32 (atomic_load_8 node:$addr))))>;
+ (i64 (zext (i32 (atomic_load_azext_8 node:$addr))))>;
def zext_aload_16_64 :
PatFrag<(ops node:$addr),
- (i64 (zext (i32 (atomic_load_16 node:$addr))))>;
+ (i64 (zext (i32 (atomic_load_azext_16 node:$addr))))>;
def zext_aload_32_64 :
PatFrag<(ops node:$addr),
(i64 (zext (i32 (atomic_load_32 node:$addr))))>;
@@ -163,9 +163,9 @@ def zext_aload_32_64 :
// results) and select a zext load; the next instruction will be sext_inreg
// which is selected by itself.
def sext_aload_8_64 :
- PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_8 node:$addr)))>;
+ PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_azext_8 node:$addr)))>;
def sext_aload_16_64 :
- PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_16 node:$addr)))>;
+ PatFrag<(ops node:$addr), (anyext (i32 (atomic_load_azext_16 node:$addr)))>;
// Select zero-extending loads
defm : LoadPat<i64, zext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
@@ -173,8 +173,8 @@ defm : LoadPat<i64, zext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
defm : LoadPat<i64, zext_aload_32_64, "ATOMIC_LOAD32_U_I64">;
// Select sign-extending loads
-defm : LoadPat<i32, atomic_load_8, "ATOMIC_LOAD8_U_I32">;
-defm : LoadPat<i32, atomic_load_16, "ATOMIC_LOAD16_U_I32">;
+defm : LoadPat<i32, atomic_load_zext_8, "ATOMIC_LOAD8_U_I32">;
+defm : LoadPat<i32, atomic_load_zext_16, "ATOMIC_LOAD16_U_I32">;
defm : LoadPat<i64, sext_aload_8_64, "ATOMIC_LOAD8_U_I64">;
defm : LoadPat<i64, sext_aload_16_64, "ATOMIC_LOAD16_U_I64">;
// 32->64 sext load gets selected as i32.atomic.load, i64.extend_i32_s
More information about the llvm-commits
mailing list