[llvm] r320855 - [Hexagon] Fix operand-swapping PatFrag for atomic stores

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 15 12:13:57 PST 2017


Author: kparzysz
Date: Fri Dec 15 12:13:57 2017
New Revision: 320855

URL: http://llvm.org/viewvc/llvm-project?rev=320855&view=rev
Log:
[Hexagon] Fix operand-swapping PatFrag for atomic stores

PatFrag now has the atomicity information stored as bit fields. They
need to be copied to the new PatFrag.

Added:
    llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomic_store.ll
Modified:
    llvm/trunk/lib/Target/Hexagon/HexagonPatterns.td

Modified: llvm/trunk/lib/Target/Hexagon/HexagonPatterns.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Hexagon/HexagonPatterns.td?rev=320855&r1=320854&r2=320855&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Hexagon/HexagonPatterns.td (original)
+++ llvm/trunk/lib/Target/Hexagon/HexagonPatterns.td Fri Dec 15 12:13:57 2017
@@ -2142,9 +2142,13 @@ class Stoream_pat<PatFrag Store, PatFrag
 // To use atomic stores with the patterns, they need to have their operands
 // swapped. This relies on the knowledge that the F.Fragment uses names
 // "ptr" and "val".
-class SwapSt<PatFrag F>
+class AtomSt<PatFrag F>
   : PatFrag<(ops node:$val, node:$ptr), F.Fragment, F.PredicateCode,
-            F.OperandTransform>;
+            F.OperandTransform> {
+  let IsAtomic = F.IsAtomic;
+  let MemoryVT = F.MemoryVT;
+}
+
 
 def IMM_BYTE : SDNodeXForm<imm, [{
   // -1 can be represented as 255, etc.
@@ -2261,10 +2265,10 @@ let AddedComplexity = 120 in {
   def: Storea_pat<store,                    I64, addrgp, S2_storerdgp>;
   def: Storea_pat<store,                    F32, addrgp, S2_storerigp>;
   def: Storea_pat<store,                    F64, addrgp, S2_storerdgp>;
-  def: Storea_pat<SwapSt<atomic_store_8>,   I32, addrgp, S2_storerbgp>;
-  def: Storea_pat<SwapSt<atomic_store_16>,  I32, addrgp, S2_storerhgp>;
-  def: Storea_pat<SwapSt<atomic_store_32>,  I32, addrgp, S2_storerigp>;
-  def: Storea_pat<SwapSt<atomic_store_64>,  I64, addrgp, S2_storerdgp>;
+  def: Storea_pat<AtomSt<atomic_store_8>,   I32, addrgp, S2_storerbgp>;
+  def: Storea_pat<AtomSt<atomic_store_16>,  I32, addrgp, S2_storerhgp>;
+  def: Storea_pat<AtomSt<atomic_store_32>,  I32, addrgp, S2_storerigp>;
+  def: Storea_pat<AtomSt<atomic_store_64>,  I64, addrgp, S2_storerdgp>;
 
   def: Stoream_pat<truncstorei8,  I64, addrgp, LoReg,    S2_storerbgp>;
   def: Stoream_pat<truncstorei16, I64, addrgp, LoReg,    S2_storerhgp>;
@@ -2280,10 +2284,10 @@ let AddedComplexity = 110 in {
   def: Storea_pat<store,                    I64, anyimm3, PS_storerdabs>;
   def: Storea_pat<store,                    F32, anyimm2, PS_storeriabs>;
   def: Storea_pat<store,                    F64, anyimm3, PS_storerdabs>;
-  def: Storea_pat<SwapSt<atomic_store_8>,   I32, anyimm0, PS_storerbabs>;
-  def: Storea_pat<SwapSt<atomic_store_16>,  I32, anyimm1, PS_storerhabs>;
-  def: Storea_pat<SwapSt<atomic_store_32>,  I32, anyimm2, PS_storeriabs>;
-  def: Storea_pat<SwapSt<atomic_store_64>,  I64, anyimm3, PS_storerdabs>;
+  def: Storea_pat<AtomSt<atomic_store_8>,   I32, anyimm0, PS_storerbabs>;
+  def: Storea_pat<AtomSt<atomic_store_16>,  I32, anyimm1, PS_storerhabs>;
+  def: Storea_pat<AtomSt<atomic_store_32>,  I32, anyimm2, PS_storeriabs>;
+  def: Storea_pat<AtomSt<atomic_store_64>,  I64, anyimm3, PS_storerdabs>;
 
   def: Stoream_pat<truncstorei8,  I64, anyimm0, LoReg,    PS_storerbabs>;
   def: Stoream_pat<truncstorei16, I64, anyimm1, LoReg,    PS_storerhabs>;
@@ -2413,10 +2417,10 @@ let AddedComplexity = 40 in {
   defm: Storexim_pat<truncstorei32, I64, anyimm2, LoReg,   S2_storeri_io>;
   defm: Storexim_pat<store,         I1,  anyimm0, I1toI32, S2_storerb_io>;
 
-  defm: Storexi_pat<SwapSt<atomic_store_8>,  I32, anyimm0, S2_storerb_io>;
-  defm: Storexi_pat<SwapSt<atomic_store_16>, I32, anyimm1, S2_storerh_io>;
-  defm: Storexi_pat<SwapSt<atomic_store_32>, I32, anyimm2, S2_storeri_io>;
-  defm: Storexi_pat<SwapSt<atomic_store_64>, I64, anyimm3, S2_storerd_io>;
+  defm: Storexi_pat<AtomSt<atomic_store_8>,  I32, anyimm0, S2_storerb_io>;
+  defm: Storexi_pat<AtomSt<atomic_store_16>, I32, anyimm1, S2_storerh_io>;
+  defm: Storexi_pat<AtomSt<atomic_store_32>, I32, anyimm2, S2_storeri_io>;
+  defm: Storexi_pat<AtomSt<atomic_store_64>, I64, anyimm3, S2_storerd_io>;
 }
 
 // Reg+Reg
@@ -2457,10 +2461,10 @@ let AddedComplexity = 10 in {
   def: Storexim_base_pat<truncstorei32, I64, LoReg,   S2_storeri_io>;
   def: Storexim_base_pat<store,         I1,  I1toI32, S2_storerb_io>;
 
-  def: Storexi_base_pat<SwapSt<atomic_store_8>,   I32, S2_storerb_io>;
-  def: Storexi_base_pat<SwapSt<atomic_store_16>,  I32, S2_storerh_io>;
-  def: Storexi_base_pat<SwapSt<atomic_store_32>,  I32, S2_storeri_io>;
-  def: Storexi_base_pat<SwapSt<atomic_store_64>,  I64, S2_storerd_io>;
+  def: Storexi_base_pat<AtomSt<atomic_store_8>,   I32, S2_storerb_io>;
+  def: Storexi_base_pat<AtomSt<atomic_store_16>,  I32, S2_storerh_io>;
+  def: Storexi_base_pat<AtomSt<atomic_store_32>,  I32, S2_storeri_io>;
+  def: Storexi_base_pat<AtomSt<atomic_store_64>,  I64, S2_storerd_io>;
 }
 
 // HVX stores

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomic_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomic_store.ll?rev=320855&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomic_store.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics/atomic_store.ll Fri Dec 15 12:13:57 2017
@@ -0,0 +1,68 @@
+; RUN: sed -e "s/ORDER/unordered/" %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/monotonic/" %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/release/" %s | llc -march=hexagon | FileCheck %s
+; RUN: sed -e "s/ORDER/seq_cst/" %s | llc -march=hexagon | FileCheck %s
+
+%struct.Obj = type { [100 x i32] }
+
+ at i8Src   = global i8 0,  align 1
+ at i8Dest  = global i8 0,  align 1
+ at i16Src  = global i16 0, align 2
+ at i16Dest = global i16 0, align 2
+ at i32Src  = global i32 0, align 4
+ at i32Dest = global i32 0, align 4
+ at i64Src  = global i64 0, align 8
+ at i64Dest = global i64 0, align 8
+ at ptrSrc  = global %struct.Obj* null, align 4
+ at ptrDest = global %struct.Obj* null, align 4
+
+define void @store_i8() #0 {
+entry:
+  %i8Tmp = load i8, i8* @i8Src, align 1
+  store atomic i8 %i8Tmp, i8* @i8Dest ORDER, align 1
+  ret void
+}
+; CHECK-LABEL: store_i8:
+; CHECK: [[TMP_REG:r[0-9]+]] = memub(gp+#i8Src)
+; CHECK: memb(gp+#i8Dest) = [[TMP_REG]]
+
+define void @store_i16() #0 {
+entry:
+  %i16Tmp = load i16, i16* @i16Src, align 2
+  store atomic i16 %i16Tmp, i16* @i16Dest ORDER, align 2
+  ret void
+}
+; CHECK-LABEL: store_i16:
+; CHECK: [[TMP_REG:r[0-9]+]] = memuh(gp+#i16Src)
+; CHECK: memh(gp+#i16Dest) = [[TMP_REG]]
+
+define void @store_i32() #0 {
+entry:
+  %i32Tmp = load i32, i32* @i32Src, align 4
+  store atomic i32 %i32Tmp, i32* @i32Dest ORDER, align 4
+  ret void
+}
+; CHECK-LABEL: store_i32:
+; CHECK: [[TMP_REG:r[0-9]+]] = memw(gp+#i32Src)
+; CHECK: memw(gp+#i32Dest) = [[TMP_REG]]
+
+define void @store_i64() #0 {
+entry:
+  %i64Tmp = load i64, i64* @i64Src, align 8
+  store atomic i64 %i64Tmp, i64* @i64Dest ORDER, align 8
+  ret void
+}
+; CHECK-LABEL: store_i64:
+; CHECK: [[TMP_REG:r[0-9]+:[0-9]+]] = memd(gp+#i64Src)
+; CHECK: memd(gp+#i64Dest) = [[TMP_REG]]
+
+define void @store_ptr() #0 {
+entry:
+  %ptrTmp = load i32, i32* bitcast (%struct.Obj** @ptrSrc to i32*), align 4
+  store atomic i32 %ptrTmp, i32* bitcast (%struct.Obj** @ptrDest to i32*) ORDER, align 4
+  ret void
+}
+; CHECK-LABEL: store_ptr:
+; CHECK: [[TMP_REG:r[0-9]+]] = memw(gp+#ptrSrc)
+; CHECK: memw(gp+#ptrDest) = [[TMP_REG]]
+




More information about the llvm-commits mailing list