[llvm] r339084 - [WebAssembly] Enable atomic expansion for unsupported atomicrmws
Heejin Ahn via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 6 17:22:22 PDT 2018
Author: aheejin
Date: Mon Aug 6 17:22:22 2018
New Revision: 339084
URL: http://llvm.org/viewvc/llvm-project?rev=339084&view=rev
Log:
[WebAssembly] Enable atomic expansion for unsupported atomicrmws
Summary:
Wasm does not have direct counterparts to some of LLVM IR's atomicrmw
instructions (min, max, umin, umax, and nand). This enables atomic
expansion using cmpxchg instruction within a loop for those atomicrmw
instructions.
Reviewers: dschuff
Subscribers: sbc100, jgravelle-google, sunfish, llvm-commits
Differential Revision: https://reviews.llvm.org/D49440
Modified:
llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.h
llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
llvm/trunk/test/CodeGen/WebAssembly/atomic-rmw.ll
Modified: llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp?rev=339084&r1=339083&r2=339084&view=diff
==============================================================================
--- llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp Mon Aug 6 17:22:22 2018
@@ -157,6 +157,23 @@ WebAssemblyTargetLowering::WebAssemblyTa
setMaxAtomicSizeInBitsSupported(64);
}
+TargetLowering::AtomicExpansionKind
+WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ // We have wasm instructions for these
+ switch (AI->getOperation()) {
+ case AtomicRMWInst::Add:
+ case AtomicRMWInst::Sub:
+ case AtomicRMWInst::And:
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::Xor:
+ case AtomicRMWInst::Xchg:
+ return AtomicExpansionKind::None;
+ default:
+ break;
+ }
+ return AtomicExpansionKind::CmpXChg;
+}
+
FastISel *WebAssemblyTargetLowering::createFastISel(
FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
return WebAssembly::createFastISel(FuncInfo, LibInfo);
Modified: llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.h?rev=339084&r1=339083&r2=339084&view=diff
==============================================================================
--- llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.h (original)
+++ llvm/trunk/lib/Target/WebAssembly/WebAssemblyISelLowering.h Mon Aug 6 17:22:22 2018
@@ -44,6 +44,7 @@ class WebAssemblyTargetLowering final :
/// right decision when generating code for different targets.
const WebAssemblySubtarget *Subtarget;
+ AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) const override;
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
Modified: llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td?rev=339084&r1=339083&r2=339084&view=diff
==============================================================================
--- llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td (original)
+++ llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td Mon Aug 6 17:22:22 2018
@@ -660,10 +660,11 @@ defm : BinRMWTruncExtPattern<
// Atomic ternary read-modify-writes
//===----------------------------------------------------------------------===//
-// TODO LLVM IR's cmpxchg instruction returns a pair of {loaded value,
-// success flag}. When we use a success flag or both values, we can't make use
-// of truncate/extend versions of instructions for now, which is suboptimal. Add
-// selection rules for those cases too.
+// TODO LLVM IR's cmpxchg instruction returns a pair of {loaded value, success
+// flag}. When we use the success flag or both values, we can't make use of i64
+// truncate/extend versions of instructions for now, which is suboptimal.
+// Consider adding a pass after instruction selection that optimizes this case
+// if it is frequent.
let Defs = [ARGUMENTS] in {
Modified: llvm/trunk/test/CodeGen/WebAssembly/atomic-rmw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/atomic-rmw.ll?rev=339084&r1=339083&r2=339084&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/atomic-rmw.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/atomic-rmw.ll Mon Aug 6 17:22:22 2018
@@ -85,6 +85,58 @@ define i1 @cmpxchg_i32_success(i32* %p,
ret i1 %succ
}
+; Unsupported instructions are expanded using cmpxchg with a loop.
+
+; CHECK-LABEL: nand_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @nand_i32(i32* %p, i32 %v) {
+ %old = atomicrmw nand i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: max_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @max_i32(i32* %p, i32 %v) {
+ %old = atomicrmw max i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: min_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @min_i32(i32* %p, i32 %v) {
+ %old = atomicrmw min i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: umax_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @umax_i32(i32* %p, i32 %v) {
+ %old = atomicrmw umax i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: umin_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @umin_i32(i32* %p, i32 %v) {
+ %old = atomicrmw umin i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
;===----------------------------------------------------------------------------
; Atomic read-modify-writes: 64-bit
;===----------------------------------------------------------------------------
@@ -164,6 +216,58 @@ define i1 @cmpxchg_i64_success(i64* %p,
ret i1 %succ
}
+; Unsupported instructions are expanded using cmpxchg with a loop.
+
+; CHECK-LABEL: nand_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @nand_i64(i64* %p, i64 %v) {
+ %old = atomicrmw nand i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: max_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @max_i64(i64* %p, i64 %v) {
+ %old = atomicrmw max i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: min_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @min_i64(i64* %p, i64 %v) {
+ %old = atomicrmw min i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: umax_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @umax_i64(i64* %p, i64 %v) {
+ %old = atomicrmw umax i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: umin_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @umin_i64(i64* %p, i64 %v) {
+ %old = atomicrmw umin i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
;===----------------------------------------------------------------------------
; Atomic truncating & sign-extending RMWs
;===----------------------------------------------------------------------------
@@ -627,6 +731,76 @@ define i64 @cmpxchg_sext_i32_i64(i32* %p
ret i64 %e
}
+; Unsupported instructions are expanded using cmpxchg with a loop.
+; Here we take a nand as an example.
+
+; nand
+
+; CHECK-LABEL: nand_sext_i8_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i32.extend8_s
+define i32 @nand_sext_i8_i32(i8* %p, i32 %v) {
+ %t = trunc i32 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = sext i8 %old to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: nand_sext_i16_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i32.extend16_s
+define i32 @nand_sext_i16_i32(i16* %p, i32 %v) {
+ %t = trunc i32 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = sext i16 %old to i32
+ ret i32 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw8_u.cmpxchg
+; CHECK-LABEL: nand_sext_i8_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i64.extend_u/i32
+; CHECK: i64.extend8_s
+define i64 @nand_sext_i8_i64(i8* %p, i64 %v) {
+ %t = trunc i64 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = sext i8 %old to i64
+ ret i64 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw16_u.cmpxchg
+; CHECK-LABEL: nand_sext_i16_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i64.extend_u/i32
+; CHECK: i64.extend16_s
+define i64 @nand_sext_i16_i64(i16* %p, i64 %v) {
+ %t = trunc i64 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = sext i16 %old to i64
+ ret i64 %e
+}
+
+; 32->64 sext rmw gets selected as i32.atomic.rmw.nand, i64_extend_s/i32
+; CHECK-LABEL: nand_sext_i32_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: i64.extend_s/i32
+define i64 @nand_sext_i32_i64(i32* %p, i64 %v) {
+ %t = trunc i64 %v to i32
+ %old = atomicrmw nand i32* %p, i32 %t seq_cst
+ %e = sext i32 %old to i64
+ ret i64 %e
+}
+
;===----------------------------------------------------------------------------
; Atomic truncating & zero-extending RMWs
;===----------------------------------------------------------------------------
@@ -1039,3 +1213,69 @@ define i64 @cmpxchg_zext_i32_i64(i32* %p
%e = zext i32 %old to i64
ret i64 %e
}
+
+; Unsupported instructions are expanded using cmpxchg with a loop.
+; Here we take a nand as an example.
+
+; nand
+
+; CHECK-LABEL: nand_zext_i8_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+define i32 @nand_zext_i8_i32(i8* %p, i32 %v) {
+ %t = trunc i32 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = zext i8 %old to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: nand_zext_i16_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+define i32 @nand_zext_i16_i32(i16* %p, i32 %v) {
+ %t = trunc i32 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = zext i16 %old to i32
+ ret i32 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw8_u.cmpxchg
+; CHECK-LABEL: nand_zext_i8_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i64.extend_u/i32
+define i64 @nand_zext_i8_i64(i8* %p, i64 %v) {
+ %t = trunc i64 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = zext i8 %old to i64
+ ret i64 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw16_u.cmpxchg
+; CHECK-LABEL: nand_zext_i16_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i64.extend_u/i32
+define i64 @nand_zext_i16_i64(i16* %p, i64 %v) {
+ %t = trunc i64 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = zext i16 %old to i64
+ ret i64 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw32_u.cmpxchg
+; CHECK-LABEL: nand_zext_i32_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: i64.extend_u/i32
+define i64 @nand_zext_i32_i64(i32* %p, i64 %v) {
+ %t = trunc i64 %v to i32
+ %old = atomicrmw nand i32* %p, i32 %t seq_cst
+ %e = zext i32 %old to i64
+ ret i64 %e
+}
More information about the llvm-commits
mailing list