[llvm] r336145 - [WebAssembly] Support for atomic stores

Heejin Ahn via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 2 14:22:59 PDT 2018


Author: aheejin
Date: Mon Jul  2 14:22:59 2018
New Revision: 336145

URL: http://llvm.org/viewvc/llvm-project?rev=336145&view=rev
Log:
[WebAssembly] Support for atomic stores

Summary: Add support for atomic store instructions.

Reviewers: dschuff

Subscribers: sbc100, jgravelle-google, sunfish, llvm-commits

Differential Revision: https://reviews.llvm.org/D48839

Added:
    llvm/trunk/test/CodeGen/WebAssembly/store-trunc-atomic.ll
Modified:
    llvm/trunk/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
    llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
    llvm/trunk/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
    llvm/trunk/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
    llvm/trunk/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
    llvm/trunk/test/CodeGen/WebAssembly/offset-atomics.ll

Modified: llvm/trunk/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h?rev=336145&r1=336144&r2=336145&view=diff
==============================================================================
--- llvm/trunk/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h (original)
+++ llvm/trunk/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h Mon Jul  2 14:22:59 2018
@@ -124,6 +124,8 @@ inline unsigned GetDefaultP2Align(unsign
   case WebAssembly::STORE8_I32_S:
   case WebAssembly::STORE8_I64:
   case WebAssembly::STORE8_I64_S:
+  case WebAssembly::ATOMIC_STORE8_I32:
+  case WebAssembly::ATOMIC_STORE8_I64:
     return 0;
   case WebAssembly::LOAD16_S_I32:
   case WebAssembly::LOAD16_S_I32_S:
@@ -141,6 +143,8 @@ inline unsigned GetDefaultP2Align(unsign
   case WebAssembly::STORE16_I32_S:
   case WebAssembly::STORE16_I64:
   case WebAssembly::STORE16_I64_S:
+  case WebAssembly::ATOMIC_STORE16_I32:
+  case WebAssembly::ATOMIC_STORE16_I64:
     return 1;
   case WebAssembly::LOAD_I32:
   case WebAssembly::LOAD_I32_S:
@@ -160,6 +164,8 @@ inline unsigned GetDefaultP2Align(unsign
   case WebAssembly::ATOMIC_LOAD_I32_S:
   case WebAssembly::ATOMIC_LOAD32_U_I64:
   case WebAssembly::ATOMIC_LOAD32_U_I64_S:
+  case WebAssembly::ATOMIC_STORE_I32:
+  case WebAssembly::ATOMIC_STORE32_I64:
     return 2;
   case WebAssembly::LOAD_I64:
   case WebAssembly::LOAD_I64_S:
@@ -171,6 +177,7 @@ inline unsigned GetDefaultP2Align(unsign
   case WebAssembly::STORE_F64_S:
   case WebAssembly::ATOMIC_LOAD_I64:
   case WebAssembly::ATOMIC_LOAD_I64_S:
+  case WebAssembly::ATOMIC_STORE_I64:
     return 3;
   default:
     llvm_unreachable("Only loads and stores have p2align values");

Modified: llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td?rev=336145&r1=336144&r2=336145&view=diff
==============================================================================
--- llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td (original)
+++ llvm/trunk/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td Mon Jul  2 14:22:59 2018
@@ -196,7 +196,145 @@ def : LoadPatExternSymOffOnly<i64, anyex
 // Atomic stores
 //===----------------------------------------------------------------------===//
 
-// TODO: add atomic stores here...
+let Defs = [ARGUMENTS] in {
+defm ATOMIC_STORE_I32 : WebAssemblyStore<I32, "i32.atomic.store", 0xfe17>;
+defm ATOMIC_STORE_I64 : WebAssemblyStore<I64, "i64.atomic.store", 0xfe18>;
+} // Defs = [ARGUMENTS]
+
+// We need an 'atomic' version of store patterns because store and atomic_store
+// nodes have different operand orders:
+// store: (store $val, $ptr)
+// atomic_store: (store $ptr, $val)
+
+let Predicates = [HasAtomics] in {
+
+// Select stores with no constant offset.
+class AStorePatNoOffset<ValueType ty, PatFrag node, NI inst> :
+  Pat<(node I32:$addr, ty:$val), (inst 0, 0, $addr, $val)>;
+def : AStorePatNoOffset<i32, atomic_store_32, ATOMIC_STORE_I32>;
+def : AStorePatNoOffset<i64, atomic_store_64, ATOMIC_STORE_I64>;
+
+// Select stores with a constant offset.
+
+// Pattern with address + immediate offset
+class AStorePatImmOff<ValueType ty, PatFrag storekind, PatFrag operand,
+                      NI inst> :
+  Pat<(storekind (operand I32:$addr, imm:$off), ty:$val),
+      (inst 0, imm:$off, $addr, ty:$val)>;
+def : AStorePatImmOff<i32, atomic_store_32, regPlusImm, ATOMIC_STORE_I32>;
+def : AStorePatImmOff<i64, atomic_store_64, regPlusImm, ATOMIC_STORE_I64>;
+def : AStorePatImmOff<i32, atomic_store_32, or_is_add, ATOMIC_STORE_I32>;
+def : AStorePatImmOff<i64, atomic_store_64, or_is_add, ATOMIC_STORE_I64>;
+
+class AStorePatGlobalAddr<ValueType ty, PatFrag storekind, NI inst> :
+  Pat<(storekind (regPlusGA I32:$addr, (WebAssemblywrapper tglobaladdr:$off)),
+                 ty:$val),
+      (inst 0, tglobaladdr:$off, I32:$addr, ty:$val)>;
+def : AStorePatGlobalAddr<i32, atomic_store_32, ATOMIC_STORE_I32>;
+def : AStorePatGlobalAddr<i64, atomic_store_64, ATOMIC_STORE_I64>;
+
+class AStorePatExternalSym<ValueType ty, PatFrag storekind, NI inst> :
+  Pat<(storekind (add I32:$addr, (WebAssemblywrapper texternalsym:$off)),
+                 ty:$val),
+      (inst 0, texternalsym:$off, I32:$addr, ty:$val)>;
+def : AStorePatExternalSym<i32, atomic_store_32, ATOMIC_STORE_I32>;
+def : AStorePatExternalSym<i64, atomic_store_64, ATOMIC_STORE_I64>;
+
+// Select stores with just a constant offset.
+class AStorePatOffsetOnly<ValueType ty, PatFrag storekind, NI inst> :
+  Pat<(storekind imm:$off, ty:$val),
+      (inst 0, imm:$off, (CONST_I32 0), ty:$val)>;
+def : AStorePatOffsetOnly<i32, atomic_store_32, ATOMIC_STORE_I32>;
+def : AStorePatOffsetOnly<i64, atomic_store_64, ATOMIC_STORE_I64>;
+
+class AStorePatGlobalAddrOffOnly<ValueType ty, PatFrag storekind, NI inst> :
+  Pat<(storekind (WebAssemblywrapper tglobaladdr:$off), ty:$val),
+      (inst 0, tglobaladdr:$off, (CONST_I32 0), ty:$val)>;
+def : AStorePatGlobalAddrOffOnly<i32, atomic_store_32, ATOMIC_STORE_I32>;
+def : AStorePatGlobalAddrOffOnly<i64, atomic_store_64, ATOMIC_STORE_I64>;
+
+class AStorePatExternSymOffOnly<ValueType ty, PatFrag storekind, NI inst> :
+  Pat<(storekind (WebAssemblywrapper texternalsym:$off), ty:$val),
+      (inst 0, texternalsym:$off, (CONST_I32 0), ty:$val)>;
+def : AStorePatExternSymOffOnly<i32, atomic_store_32, ATOMIC_STORE_I32>;
+def : AStorePatExternSymOffOnly<i64, atomic_store_64, ATOMIC_STORE_I64>;
+
+} // Predicates = [HasAtomics]
+
+// Truncating stores.
+let Defs = [ARGUMENTS] in {
+defm ATOMIC_STORE8_I32 : WebAssemblyStore<I32, "i32.atomic.store8", 0xfe19>;
+defm ATOMIC_STORE16_I32 : WebAssemblyStore<I32, "i32.atomic.store16", 0xfe1a>;
+defm ATOMIC_STORE8_I64 : WebAssemblyStore<I64, "i64.atomic.store8", 0xfe1b>;
+defm ATOMIC_STORE16_I64 : WebAssemblyStore<I64, "i64.atomic.store16", 0xfe1c>;
+defm ATOMIC_STORE32_I64 : WebAssemblyStore<I64, "i64.atomic.store32", 0xfe1d>;
+} // Defs = [ARGUMENTS]
+
+// Fragments for truncating stores.
+
+// We don't have single truncating atomic store instructions. For 32-bit
+// instructions, we just need to match bare atomic stores. On the other hand,
+// truncating stores from i64 values are once truncated to i32 first.
+class trunc_astore_64<PatFrag storekind> :
+  PatFrag<(ops node:$addr, node:$val),
+          (storekind node:$addr, (i32 (trunc (i64 node:$val))))>;
+def trunc_astore_8_64 : trunc_astore_64<atomic_store_8>;
+def trunc_astore_16_64 : trunc_astore_64<atomic_store_16>;
+def trunc_astore_32_64 : trunc_astore_64<atomic_store_32>;
+
+let Predicates = [HasAtomics] in {
+
+// Truncating stores with no constant offset
+def : AStorePatNoOffset<i32, atomic_store_8, ATOMIC_STORE8_I32>;
+def : AStorePatNoOffset<i32, atomic_store_16, ATOMIC_STORE16_I32>;
+def : AStorePatNoOffset<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>;
+def : AStorePatNoOffset<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>;
+def : AStorePatNoOffset<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>;
+
+// Truncating stores with a constant offset
+def : AStorePatImmOff<i32, atomic_store_8, regPlusImm, ATOMIC_STORE8_I32>;
+def : AStorePatImmOff<i32, atomic_store_16, regPlusImm, ATOMIC_STORE16_I32>;
+def : AStorePatImmOff<i64, trunc_astore_8_64, regPlusImm, ATOMIC_STORE8_I64>;
+def : AStorePatImmOff<i64, trunc_astore_16_64, regPlusImm, ATOMIC_STORE16_I64>;
+def : AStorePatImmOff<i64, trunc_astore_32_64, regPlusImm, ATOMIC_STORE32_I64>;
+def : AStorePatImmOff<i32, atomic_store_8, or_is_add, ATOMIC_STORE8_I32>;
+def : AStorePatImmOff<i32, atomic_store_16, or_is_add, ATOMIC_STORE16_I32>;
+def : AStorePatImmOff<i64, trunc_astore_8_64, or_is_add, ATOMIC_STORE8_I64>;
+def : AStorePatImmOff<i64, trunc_astore_16_64, or_is_add, ATOMIC_STORE16_I64>;
+def : AStorePatImmOff<i64, trunc_astore_32_64, or_is_add, ATOMIC_STORE32_I64>;
+
+def : AStorePatGlobalAddr<i32, atomic_store_8, ATOMIC_STORE8_I32>;
+def : AStorePatGlobalAddr<i32, atomic_store_16, ATOMIC_STORE16_I32>;
+def : AStorePatGlobalAddr<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>;
+def : AStorePatGlobalAddr<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>;
+def : AStorePatGlobalAddr<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>;
+
+def : AStorePatExternalSym<i32, atomic_store_8, ATOMIC_STORE8_I32>;
+def : AStorePatExternalSym<i32, atomic_store_16, ATOMIC_STORE16_I32>;
+def : AStorePatExternalSym<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>;
+def : AStorePatExternalSym<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>;
+def : AStorePatExternalSym<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>;
+
+// Truncating stores with just a constant offset
+def : AStorePatOffsetOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>;
+def : AStorePatOffsetOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>;
+def : AStorePatOffsetOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>;
+def : AStorePatOffsetOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>;
+def : AStorePatOffsetOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>;
+
+def : AStorePatGlobalAddrOffOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>;
+def : AStorePatGlobalAddrOffOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>;
+def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>;
+def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>;
+def : AStorePatGlobalAddrOffOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>;
+
+def : AStorePatExternSymOffOnly<i32, atomic_store_8, ATOMIC_STORE8_I32>;
+def : AStorePatExternSymOffOnly<i32, atomic_store_16, ATOMIC_STORE16_I32>;
+def : AStorePatExternSymOffOnly<i64, trunc_astore_8_64, ATOMIC_STORE8_I64>;
+def : AStorePatExternSymOffOnly<i64, trunc_astore_16_64, ATOMIC_STORE16_I64>;
+def : AStorePatExternSymOffOnly<i64, trunc_astore_32_64, ATOMIC_STORE32_I64>;
+
+} // Predicates = [HasAtomics]
 
 //===----------------------------------------------------------------------===//
 // Low-level exclusive operations

Modified: llvm/trunk/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp?rev=336145&r1=336144&r2=336145&view=diff
==============================================================================
--- llvm/trunk/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp (original)
+++ llvm/trunk/lib/Target/WebAssembly/WebAssemblySetP2AlignOperands.cpp Mon Jul  2 14:22:59 2018
@@ -118,6 +118,13 @@ bool WebAssemblySetP2AlignOperands::runO
       case WebAssembly::STORE8_I64:
       case WebAssembly::STORE16_I64:
       case WebAssembly::STORE32_I64:
+      case WebAssembly::ATOMIC_STORE_I32:
+      case WebAssembly::ATOMIC_STORE8_I32:
+      case WebAssembly::ATOMIC_STORE16_I32:
+      case WebAssembly::ATOMIC_STORE_I64:
+      case WebAssembly::ATOMIC_STORE8_I64:
+      case WebAssembly::ATOMIC_STORE16_I64:
+      case WebAssembly::ATOMIC_STORE32_I64:
         RewriteP2Align(MI, WebAssembly::StoreP2AlignOperandNo);
         break;
       default:

Modified: llvm/trunk/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/i32-load-store-alignment.ll?rev=336145&r1=336144&r2=336145&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/i32-load-store-alignment.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/i32-load-store-alignment.ll Mon Jul  2 14:22:59 2018
@@ -5,6 +5,8 @@
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
 target triple = "wasm32-unknown-unknown"
 
+; Loads.
+
 ; CHECK-LABEL: ldi32_a1:
 ; CHECK-NEXT: .param i32{{$}}
 ; CHECK-NEXT: .result i32{{$}}
@@ -236,3 +238,21 @@ define i32 @ldi32_atomic_a8(i32 *%p) {
   %v = load atomic i32, i32* %p seq_cst, align 8
   ret i32 %v
 }
+
+; CHECK-LABEL: sti32_atomic_a4:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_atomic_a4(i32 *%p, i32 %v) {
+ store atomic i32 %v, i32* %p seq_cst, align 4
+ ret void
+}
+
+; CHECK-LABEL: sti32_atomic_a8:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_atomic_a8(i32 *%p, i32 %v) {
+ store atomic i32 %v, i32* %p seq_cst, align 8
+ ret void
+}

Modified: llvm/trunk/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/i64-load-store-alignment.ll?rev=336145&r1=336144&r2=336145&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/i64-load-store-alignment.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/i64-load-store-alignment.ll Mon Jul  2 14:22:59 2018
@@ -5,6 +5,8 @@
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
 target triple = "wasm32-unknown-unknown"
 
+; Loads.
+
 ; CHECK-LABEL: ldi64_a1:
 ; CHECK-NEXT: .param i32{{$}}
 ; CHECK-NEXT: .result i64{{$}}
@@ -325,6 +327,9 @@ define void @sti32_a8(i32 *%p, i64 %w) {
 }
 
 ; Atomics.
+; Wasm atomics have the alignment field, but it must always have the type's
+; natural alignment.
+
 ; CHECK-LABEL: ldi64_atomic_a8:
 ; CHECK-NEXT: .param i32{{$}}
 ; CHECK-NEXT: .result i64{{$}}
@@ -336,7 +341,6 @@ define i64 @ldi64_atomic_a8(i64 *%p) {
 }
 
 ; 16 is greater than the default alignment so it is ignored.
-
 ; CHECK-LABEL: ldi64_atomic_a16:
 ; CHECK-NEXT: .param i32{{$}}
 ; CHECK-NEXT: .result i64{{$}}
@@ -346,3 +350,22 @@ define i64 @ldi64_atomic_a16(i64 *%p) {
   %v = load atomic i64, i64* %p seq_cst, align 16
   ret i64 %v
 }
+
+; CHECK-LABEL: sti64_atomic_a4:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64_atomic_a4(i64 *%p, i64 %v) {
+ store atomic i64 %v, i64* %p seq_cst, align 8
+ ret void
+}
+
+; 16 is greater than the default alignment so it is ignored.
+; CHECK-LABEL: sti64_atomic_a8:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64_atomic_a8(i64 *%p, i64 %v) {
+ store atomic i64 %v, i64* %p seq_cst, align 16
+ ret void
+}

Modified: llvm/trunk/test/CodeGen/WebAssembly/offset-atomics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/offset-atomics.ll?rev=336145&r1=336144&r2=336145&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/offset-atomics.ll (original)
+++ llvm/trunk/test/CodeGen/WebAssembly/offset-atomics.ll Mon Jul  2 14:22:59 2018
@@ -6,6 +6,8 @@
 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
 target triple = "wasm32-unknown-unknown"
 
+; Basic load.
+
 ; CHECK-LABEL: load_i32_no_offset:
 ; CHECK: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
 ; CHECK-NEXT: return $pop[[NUM]]{{$}}
@@ -154,6 +156,148 @@ define i32 @load_i32_with_folded_or_offs
   ret i32 %conv
 }
 
+; Same as above but with store.
+
+; CHECK-LABEL: store_i32_no_offset:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @store_i32_no_offset(i32 *%p, i32 %v) {
+  store atomic i32 %v, i32* %p seq_cst, align 4
+  ret void
+}
+
+; Same as above but with store.
+
+; CHECK-LABEL: store_i32_with_folded_offset:
+; CHECK: i32.atomic.store 24($0), $pop0{{$}}
+define void @store_i32_with_folded_offset(i32* %p) {
+  %q = ptrtoint i32* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  store atomic i32 0, i32* %s seq_cst, align 4
+  ret void
+}
+
+; Same as above but with store.
+
+; CHECK-LABEL: store_i32_with_folded_gep_offset:
+; CHECK: i32.atomic.store 24($0), $pop0{{$}}
+define void @store_i32_with_folded_gep_offset(i32* %p) {
+  %s = getelementptr inbounds i32, i32* %p, i32 6
+  store atomic i32 0, i32* %s seq_cst, align 4
+  ret void
+}
+
+; Same as above but with store.
+
+; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset:
+; CHECK: i32.const        $push0=, -24{{$}}
+; CHECK: i32.add          $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
+define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) {
+  %s = getelementptr inbounds i32, i32* %p, i32 -6
+  store atomic i32 0, i32* %s seq_cst, align 4
+  ret void
+}
+
+; Same as above but with store.
+
+; CHECK-LABEL: store_i32_with_unfolded_offset:
+; CHECK: i32.const        $push0=, 24{{$}}
+; CHECK: i32.add          $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
+define void @store_i32_with_unfolded_offset(i32* %p) {
+  %q = ptrtoint i32* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  store atomic i32 0, i32* %s seq_cst, align 4
+  ret void
+}
+
+; Same as above but with store.
+
+; CHECK-LABEL: store_i32_with_unfolded_gep_offset:
+; CHECK: i32.const        $push0=, 24{{$}}
+; CHECK: i32.add          $push1=, $0, $pop0{{$}}
+; CHECK: i32.atomic.store 0($pop1), $pop2{{$}}
+define void @store_i32_with_unfolded_gep_offset(i32* %p) {
+  %s = getelementptr i32, i32* %p, i32 6
+  store atomic i32 0, i32* %s seq_cst, align 4
+  ret void
+}
+
+; Same as above but with store with i64.
+
+; CHECK-LABEL: store_i64_no_offset:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @store_i64_no_offset(i64 *%p, i64 %v) {
+  store atomic i64 %v, i64* %p seq_cst, align 8
+  ret void
+}
+
+; Same as above but with store with i64.
+
+; CHECK-LABEL: store_i64_with_folded_offset:
+; CHECK: i64.atomic.store 24($0), $pop0{{$}}
+define void @store_i64_with_folded_offset(i64* %p) {
+  %q = ptrtoint i64* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  store atomic i64 0, i64* %s seq_cst, align 8
+  ret void
+}
+
+; Same as above but with store with i64.
+
+; CHECK-LABEL: store_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.store 24($0), $pop0{{$}}
+define void @store_i64_with_folded_gep_offset(i64* %p) {
+  %s = getelementptr inbounds i64, i64* %p, i32 3
+  store atomic i64 0, i64* %s seq_cst, align 8
+  ret void
+}
+
+; Same as above but with store with i64.
+
+; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset:
+; CHECK: i32.const        $push0=, -24{{$}}
+; CHECK: i32.add          $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
+define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) {
+  %s = getelementptr inbounds i64, i64* %p, i32 -3
+  store atomic i64 0, i64* %s seq_cst, align 8
+  ret void
+}
+
+; Same as above but with store with i64.
+
+; CHECK-LABEL: store_i64_with_unfolded_offset:
+; CHECK: i32.const        $push0=, 24{{$}}
+; CHECK: i32.add          $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
+define void @store_i64_with_unfolded_offset(i64* %p) {
+  %q = ptrtoint i64* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  store atomic i64 0, i64* %s seq_cst, align 8
+  ret void
+}
+
+; Same as above but with store with i64.
+
+; CHECK-LABEL: store_i64_with_unfolded_gep_offset:
+; CHECK: i32.const        $push0=, 24{{$}}
+; CHECK: i32.add          $push1=, $0, $pop0{{$}}
+; CHECK: i64.atomic.store 0($pop1), $pop2{{$}}
+define void @store_i64_with_unfolded_gep_offset(i64* %p) {
+  %s = getelementptr i64, i64* %p, i32 3
+  store atomic i64 0, i64* %s seq_cst, align 8
+  ret void
+}
+
 ; When loading from a fixed address, materialize a zero.
 
 ; CHECK-LABEL: load_i32_from_numeric_address
@@ -175,6 +319,25 @@ define i32 @load_i32_from_global_address
   ret i32 %t
 }
 
+; CHECK-LABEL: store_i32_to_numeric_address:
+; CHECK-NEXT: i32.const        $push0=, 0{{$}}
+; CHECK-NEXT: i32.const        $push1=, 0{{$}}
+; CHECK-NEXT: i32.atomic.store 42($pop0), $pop1{{$}}
+define void @store_i32_to_numeric_address() {
+  %s = inttoptr i32 42 to i32*
+  store atomic i32 0, i32* %s seq_cst, align 4
+  ret void
+}
+
+; CHECK-LABEL: store_i32_to_global_address:
+; CHECK: i32.const        $push0=, 0{{$}}
+; CHECK: i32.const        $push1=, 0{{$}}
+; CHECK: i32.atomic.store gv($pop0), $pop1{{$}}
+define void @store_i32_to_global_address() {
+  store atomic i32 0, i32* @gv seq_cst, align 4
+  ret void
+}
+
 ; Fold an offset into a sign-extending load.
 
 ; CHECK-LABEL: load_i8_s_with_folded_offset:
@@ -305,3 +468,158 @@ define i8 @ldi8_a1(i8 *%p) {
   %v = load atomic i8, i8* %p seq_cst, align 1
   ret i8 %v
 }
+
+; Fold an offset into a truncating store.
+
+; CHECK-LABEL: store_i8_with_folded_offset:
+; CHECK: i32.atomic.store8 24($0), $pop0{{$}}
+define void @store_i8_with_folded_offset(i8* %p) {
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  store atomic i8 0, i8* %s seq_cst, align 1
+  ret void
+}
+
+; CHECK-LABEL: store_i16_with_folded_offset:
+; CHECK: i32.atomic.store16 24($0), $pop0{{$}}
+define void @store_i16_with_folded_offset(i16* %p) {
+  %q = ptrtoint i16* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  store atomic i16 0, i16* %s seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: store_i8_i64_with_folded_offset:
+; CHECK: i64.atomic.store8 24($0), $1{{$}}
+define void @store_i8_i64_with_folded_offset(i8* %p, i64 %v) {
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  %t = trunc i64 %v to i8
+  store atomic i8 %t, i8* %s seq_cst, align 1
+  ret void
+}
+
+; CHECK-LABEL: store_i16_i64_with_folded_offset:
+; CHECK: i64.atomic.store16 24($0), $1{{$}}
+define void @store_i16_i64_with_folded_offset(i16* %p, i64 %v) {
+  %q = ptrtoint i16* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  %t = trunc i64 %v to i16
+  store atomic i16 %t, i16* %s seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: store_i32_i64_with_folded_offset:
+; CHECK: i64.atomic.store32 24($0), $1{{$}}
+define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) {
+  %q = ptrtoint i32* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  %t = trunc i64 %v to i32
+  store atomic i32 %t, i32* %s seq_cst, align 4
+  ret void
+}
+
+; Fold a gep offset into a truncating store.
+
+; CHECK-LABEL: store_i8_with_folded_gep_offset:
+; CHECK: i32.atomic.store8 24($0), $pop0{{$}}
+define void @store_i8_with_folded_gep_offset(i8* %p) {
+  %s = getelementptr inbounds i8, i8* %p, i32 24
+  store atomic i8 0, i8* %s seq_cst, align 1
+  ret void
+}
+
+; CHECK-LABEL: store_i16_with_folded_gep_offset:
+; CHECK: i32.atomic.store16 48($0), $pop0{{$}}
+define void @store_i16_with_folded_gep_offset(i16* %p) {
+  %s = getelementptr inbounds i16, i16* %p, i32 24
+  store atomic i16 0, i16* %s seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: store_i8_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.store8 24($0), $1{{$}}
+define void @store_i8_i64_with_folded_gep_offset(i8* %p, i64 %v) {
+  %s = getelementptr inbounds i8, i8* %p, i32 24
+  %t = trunc i64 %v to i8
+  store atomic i8 %t, i8* %s seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: store_i16_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.store16 48($0), $1{{$}}
+define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) {
+  %s = getelementptr inbounds i16, i16* %p, i32 24
+  %t = trunc i64 %v to i16
+  store atomic i16 %t, i16* %s seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: store_i32_i64_with_folded_gep_offset:
+; CHECK: i64.atomic.store32 96($0), $1{{$}}
+define void @store_i32_i64_with_folded_gep_offset(i32* %p, i64 %v) {
+  %s = getelementptr inbounds i32, i32* %p, i32 24
+  %t = trunc i64 %v to i32
+  store atomic i32 %t, i32* %s seq_cst, align 4
+  ret void
+}
+
+; Fold an or_is_add pattern based offset into a truncating store.
+
+; CHECK-LABEL: store_i8_with_folded_or_offset:
+; CHECK: i32.atomic.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
+define void @store_i8_with_folded_or_offset(i32 %x) {
+  %and = and i32 %x, -4
+  %p = inttoptr i32 %and to i8*
+  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
+  store atomic i8 0, i8* %arrayidx seq_cst, align 1
+  ret void
+}
+
+; CHECK-LABEL: store_i16_with_folded_or_offset:
+; CHECK: i32.atomic.store16 4($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}}
+define void @store_i16_with_folded_or_offset(i32 %x) {
+  %and = and i32 %x, -4
+  %p = inttoptr i32 %and to i16*
+  %arrayidx = getelementptr inbounds i16, i16* %p, i32 2
+  store atomic i16 0, i16* %arrayidx seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: store_i8_i64_with_folded_or_offset:
+; CHECK: i64.atomic.store8 2($pop{{[0-9]+}}), $1{{$}}
+define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) {
+  %and = and i32 %x, -4
+  %p = inttoptr i32 %and to i8*
+  %arrayidx = getelementptr inbounds i8, i8* %p, i32 2
+  %t = trunc i64 %v to i8
+  store atomic i8 %t, i8* %arrayidx seq_cst, align 1
+  ret void
+}
+
+; CHECK-LABEL: store_i16_i64_with_folded_or_offset:
+; CHECK: i64.atomic.store16 4($pop{{[0-9]+}}), $1{{$}}
+define void @store_i16_i64_with_folded_or_offset(i32 %x, i64 %v) {
+  %and = and i32 %x, -4
+  %p = inttoptr i32 %and to i16*
+  %arrayidx = getelementptr inbounds i16, i16* %p, i32 2
+  %t = trunc i64 %v to i16
+  store atomic i16 %t, i16* %arrayidx seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: store_i32_i64_with_folded_or_offset:
+; CHECK: i64.atomic.store32 8($pop{{[0-9]+}}), $1{{$}}
+define void @store_i32_i64_with_folded_or_offset(i32 %x, i64 %v) {
+  %and = and i32 %x, -4
+  %p = inttoptr i32 %and to i32*
+  %arrayidx = getelementptr inbounds i32, i32* %p, i32 2
+  %t = trunc i64 %v to i32
+  store atomic i32 %t, i32* %arrayidx seq_cst, align 4
+  ret void
+}

Added: llvm/trunk/test/CodeGen/WebAssembly/store-trunc-atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/WebAssembly/store-trunc-atomic.ll?rev=336145&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/WebAssembly/store-trunc-atomic.ll (added)
+++ llvm/trunk/test/CodeGen/WebAssembly/store-trunc-atomic.ll Mon Jul  2 14:22:59 2018
@@ -0,0 +1,46 @@
+; RUN: llc < %s -mattr=+atomics,+sign-ext -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s
+
+; Test that truncating stores are assembled properly.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+; CHECK-LABEL: trunc_i8_i32:
+; CHECK: i32.atomic.store8 0($0), $1{{$}}
+define void @trunc_i8_i32(i8 *%p, i32 %v) {
+  %t = trunc i32 %v to i8
+  store atomic i8 %t, i8* %p seq_cst, align 1
+  ret void
+}
+
+; CHECK-LABEL: trunc_i16_i32:
+; CHECK: i32.atomic.store16 0($0), $1{{$}}
+define void @trunc_i16_i32(i16 *%p, i32 %v) {
+  %t = trunc i32 %v to i16
+  store atomic i16 %t, i16* %p seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: trunc_i8_i64:
+; CHECK: i64.atomic.store8 0($0), $1{{$}}
+define void @trunc_i8_i64(i8 *%p, i64 %v) {
+  %t = trunc i64 %v to i8
+  store atomic i8 %t, i8* %p seq_cst, align 1
+  ret void
+}
+
+; CHECK-LABEL: trunc_i16_i64:
+; CHECK: i64.atomic.store16 0($0), $1{{$}}
+define void @trunc_i16_i64(i16 *%p, i64 %v) {
+  %t = trunc i64 %v to i16
+  store atomic i16 %t, i16* %p seq_cst, align 2
+  ret void
+}
+
+; CHECK-LABEL: trunc_i32_i64:
+; CHECK: i64.atomic.store32 0($0), $1{{$}}
+define void @trunc_i32_i64(i32 *%p, i64 %v) {
+  %t = trunc i64 %v to i32
+  store atomic i32 %t, i32* %p seq_cst, align 4
+  ret void
+}




More information about the llvm-commits mailing list