[llvm] 47f3dc6 - [LoongArch] Add codegen support for atomic fence, atomic load and atomic store

Weining Lu via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 13 00:27:43 PDT 2022


Author: gonglingqin
Date: 2022-07-13T15:25:45+08:00
New Revision: 47f3dc6d49065bf244ab32d6937fb7e35c057db7

URL: https://github.com/llvm/llvm-project/commit/47f3dc6d49065bf244ab32d6937fb7e35c057db7
DIFF: https://github.com/llvm/llvm-project/commit/47f3dc6d49065bf244ab32d6937fb7e35c057db7.diff

LOG: [LoongArch] Add codegen support for atomic fence, atomic load and atomic store

Differential Revision: https://reviews.llvm.org/D128901

Added: 
    llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll
    llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
    llvm/test/Transforms/AtomicExpand/LoongArch/lit.local.cfg
    llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll

Modified: 
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchISelLowering.h
    llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 93207eb78e4c..5c2652114375 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -95,6 +95,8 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
 
   setBooleanContents(ZeroOrOneBooleanContent);
 
+  setMaxAtomicSizeInBitsSupported(Subtarget.getGRLen());
+
   // Function alignments.
   const Align FunctionAlignment(4);
   setMinFunctionAlignment(FunctionAlignment);

diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
index 03afd1cb55f8..be58660893eb 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.h
@@ -109,6 +109,10 @@ class LoongArchTargetLowering : public TargetLowering {
 
   bool isFPImmLegal(const APFloat &Imm, EVT VT,
                     bool ForCodeSize) const override;
+
+  bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+    return isa<LoadInst>(I) || isa<StoreInst>(I);
+  }
 };
 
 } // end namespace llvm

diff  --git a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
index fa0b20ea1172..e3286f6590cc 100644
--- a/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchInstrInfo.td
@@ -827,6 +827,23 @@ defm : StPat<truncstorei32, ST_W, GPR, i64>;
 defm : StPat<store, ST_D, GPR, i64>;
 } // Predicates = [IsLA64]
 
+/// Atomic loads and stores
+
+def : Pat<(atomic_fence timm, timm), (DBAR 0)>;
+
+defm : LdPat<atomic_load_8, LD_B>;
+defm : LdPat<atomic_load_16, LD_H>;
+defm : LdPat<atomic_load_32, LD_W>;
+
+defm : StPat<atomic_store_8, ST_B, GPR, GRLenVT>;
+defm : StPat<atomic_store_16, ST_H, GPR, GRLenVT>;
+defm : StPat<atomic_store_32, ST_W, GPR, i32>, Requires<[IsLA32]>;
+let Predicates = [IsLA64] in {
+defm : LdPat<atomic_load_64, LD_D>;
+defm : StPat<atomic_store_32, ST_W, GPR, i64>;
+defm : StPat<atomic_store_64, ST_D, GPR, i64>;
+} // Predicates = [IsLA64]
+
 /// Other pseudo-instructions
 
 // Pessimistically assume the stack pointer will be clobbered

diff  --git a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
index 3a1a46a9e624..468c4f43cb90 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetMachine.cpp
@@ -102,6 +102,7 @@ class LoongArchPassConfig : public TargetPassConfig {
     return getTM<LoongArchTargetMachine>();
   }
 
+  void addIRPasses() override;
   bool addInstSelector() override;
 };
 } // namespace
@@ -111,6 +112,12 @@ LoongArchTargetMachine::createPassConfig(PassManagerBase &PM) {
   return new LoongArchPassConfig(*this, PM);
 }
 
+void LoongArchPassConfig::addIRPasses() {
+  addPass(createAtomicExpandPass());
+
+  TargetPassConfig::addIRPasses();
+}
+
 bool LoongArchPassConfig::addInstSelector() {
   addPass(createLoongArchISelDag(getLoongArchTargetMachine()));
 

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll
new file mode 100644
index 000000000000..f8c98bbc7138
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/fence.ll
@@ -0,0 +1,58 @@
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+define void @fence_acquire() nounwind {
+; LA32-LABEL: fence_acquire:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_acquire:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  fence acquire
+  ret void
+}
+
+define void @fence_release() nounwind {
+; LA32-LABEL: fence_release:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_release:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  fence release
+  ret void
+}
+
+define void @fence_acq_rel() nounwind {
+; LA32-LABEL: fence_acq_rel:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_acq_rel:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  fence acq_rel
+  ret void
+}
+
+define void @fence_seq_cst() nounwind {
+; LA32-LABEL: fence_seq_cst:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: fence_seq_cst:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  fence seq_cst
+  ret void
+}

diff  --git a/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
new file mode 100644
index 000000000000..1f06c818acf2
--- /dev/null
+++ b/llvm/test/CodeGen/LoongArch/ir-instruction/load-store-atomic.ll
@@ -0,0 +1,143 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
+; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
+
+define i8 @load_acquire_i8(ptr %ptr) {
+; LA32-LABEL: load_acquire_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.b $a0, $a0, 0
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.b $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %val = load atomic i8, ptr %ptr acquire, align 1
+  ret i8 %val
+}
+
+define i16 @load_acquire_i16(ptr %ptr) {
+; LA32-LABEL: load_acquire_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.h $a0, $a0, 0
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.h $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %val = load atomic i16, ptr %ptr acquire, align 2
+  ret i16 %val
+}
+
+define i32 @load_acquire_i32(ptr %ptr) {
+; LA32-LABEL: load_acquire_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    ld.w $a0, $a0, 0
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.w $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %val = load atomic i32, ptr %ptr acquire, align 4
+  ret i32 %val
+}
+
+define i64 @load_acquire_i64(ptr %ptr) {
+; LA32-LABEL: load_acquire_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    ori $a1, $zero, 2
+; LA32-NEXT:    bl __atomic_load_8
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: load_acquire_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    ld.d $a0, $a0, 0
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  %val = load atomic i64, ptr %ptr acquire, align 8
+  ret i64 %val
+}
+
+define void @store_release_i8(ptr %ptr, i8 signext %v) {
+; LA32-LABEL: store_release_i8:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    st.b $a0, $a1, 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i8:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    st.b $a0, $a1, 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  store atomic i8 %v, ptr %ptr release, align 1
+  ret void
+}
+
+define void @store_release_i16(ptr %ptr, i16 signext %v) {
+; LA32-LABEL: store_release_i16:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    st.h $a0, $a1, 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i16:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    st.h $a0, $a1, 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  store atomic i16 %v, ptr %ptr release, align 2
+  ret void
+}
+
+define void @store_release_i32(ptr %ptr, i32 signext %v) {
+; LA32-LABEL: store_release_i32:
+; LA32:       # %bb.0:
+; LA32-NEXT:    dbar 0
+; LA32-NEXT:    st.w $a0, $a1, 0
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i32:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    st.w $a0, $a1, 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  store atomic i32 %v, ptr %ptr release, align 4
+  ret void
+}
+
+define void @store_release_i64(ptr %ptr, i64 %v) {
+; LA32-LABEL: store_release_i64:
+; LA32:       # %bb.0:
+; LA32-NEXT:    addi.w $sp, $sp, -16
+; LA32-NEXT:    .cfi_def_cfa_offset 16
+; LA32-NEXT:    st.w $ra, $sp, 12 # 4-byte Folded Spill
+; LA32-NEXT:    .cfi_offset 1, -4
+; LA32-NEXT:    ori $a3, $zero, 3
+; LA32-NEXT:    bl __atomic_store_8
+; LA32-NEXT:    ld.w $ra, $sp, 12 # 4-byte Folded Reload
+; LA32-NEXT:    addi.w $sp, $sp, 16
+; LA32-NEXT:    jirl $zero, $ra, 0
+;
+; LA64-LABEL: store_release_i64:
+; LA64:       # %bb.0:
+; LA64-NEXT:    dbar 0
+; LA64-NEXT:    st.d $a0, $a1, 0
+; LA64-NEXT:    jirl $zero, $ra, 0
+  store atomic i64 %v, ptr %ptr release, align 8
+  ret void
+}

diff  --git a/llvm/test/Transforms/AtomicExpand/LoongArch/lit.local.cfg b/llvm/test/Transforms/AtomicExpand/LoongArch/lit.local.cfg
new file mode 100644
index 000000000000..31902e060f32
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/lit.local.cfg
@@ -0,0 +1,5 @@
+config.suffixes = ['.ll']
+
+targets = set(config.root.targets_to_build.split())
+if not 'LoongArch' in targets:
+    config.unsupported = True

diff  --git a/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
new file mode 100644
index 000000000000..4acf9761421a
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/LoongArch/load-store-atomic.ll
@@ -0,0 +1,121 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S --mtriple=loongarch32 --atomic-expand %s | FileCheck %s --check-prefix=LA32
+; RUN: opt -S --mtriple=loongarch64 --atomic-expand %s | FileCheck %s --check-prefix=LA64
+
+define i8 @load_acquire_i8(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i8(
+; LA32-NEXT:    [[VAL:%.*]] = load atomic i8, ptr [[PTR:%.*]] monotonic, align 1
+; LA32-NEXT:    fence acquire
+; LA32-NEXT:    ret i8 [[VAL]]
+;
+; LA64-LABEL: @load_acquire_i8(
+; LA64-NEXT:    [[VAL:%.*]] = load atomic i8, ptr [[PTR:%.*]] monotonic, align 1
+; LA64-NEXT:    fence acquire
+; LA64-NEXT:    ret i8 [[VAL]]
+;
+  %val = load atomic i8, ptr %ptr acquire, align 1
+  ret i8 %val
+}
+
+define i16 @load_acquire_i16(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i16(
+; LA32-NEXT:    [[VAL:%.*]] = load atomic i16, ptr [[PTR:%.*]] monotonic, align 2
+; LA32-NEXT:    fence acquire
+; LA32-NEXT:    ret i16 [[VAL]]
+;
+; LA64-LABEL: @load_acquire_i16(
+; LA64-NEXT:    [[VAL:%.*]] = load atomic i16, ptr [[PTR:%.*]] monotonic, align 2
+; LA64-NEXT:    fence acquire
+; LA64-NEXT:    ret i16 [[VAL]]
+;
+  %val = load atomic i16, ptr %ptr acquire, align 2
+  ret i16 %val
+}
+
+define i32 @load_acquire_i32(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i32(
+; LA32-NEXT:    [[VAL:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
+; LA32-NEXT:    fence acquire
+; LA32-NEXT:    ret i32 [[VAL]]
+;
+; LA64-LABEL: @load_acquire_i32(
+; LA64-NEXT:    [[VAL:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
+; LA64-NEXT:    fence acquire
+; LA64-NEXT:    ret i32 [[VAL]]
+;
+  %val = load atomic i32, ptr %ptr acquire, align 4
+  ret i32 %val
+}
+
+define i64 @load_acquire_i64(ptr %ptr) {
+; LA32-LABEL: @load_acquire_i64(
+; LA32-NEXT:    [[TMP1:%.*]] = call i64 @__atomic_load_8(ptr [[PTR:%.*]], i32 2)
+; LA32-NEXT:    ret i64 [[TMP1]]
+;
+; LA64-LABEL: @load_acquire_i64(
+; LA64-NEXT:    [[VAL:%.*]] = load atomic i64, ptr [[PTR:%.*]] monotonic, align 8
+; LA64-NEXT:    fence acquire
+; LA64-NEXT:    ret i64 [[VAL]]
+;
+  %val = load atomic i64, ptr %ptr acquire, align 8
+  ret i64 %val
+}
+
+define void @store_release_i8(ptr %ptr, i8 signext %v) {
+; LA32-LABEL: @store_release_i8(
+; LA32-NEXT:    fence release
+; LA32-NEXT:    store atomic i8 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 1
+; LA32-NEXT:    ret void
+;
+; LA64-LABEL: @store_release_i8(
+; LA64-NEXT:    fence release
+; LA64-NEXT:    store atomic i8 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 1
+; LA64-NEXT:    ret void
+;
+  store atomic i8 %v, ptr %ptr release, align 1
+  ret void
+}
+
+define void @store_release_i16(ptr %ptr, i16 signext %v) {
+; LA32-LABEL: @store_release_i16(
+; LA32-NEXT:    fence release
+; LA32-NEXT:    store atomic i16 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 2
+; LA32-NEXT:    ret void
+;
+; LA64-LABEL: @store_release_i16(
+; LA64-NEXT:    fence release
+; LA64-NEXT:    store atomic i16 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 2
+; LA64-NEXT:    ret void
+;
+  store atomic i16 %v, ptr %ptr release, align 2
+  ret void
+}
+
+define void @store_release_i32(ptr %ptr, i32 signext %v) {
+; LA32-LABEL: @store_release_i32(
+; LA32-NEXT:    fence release
+; LA32-NEXT:    store atomic i32 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 4
+; LA32-NEXT:    ret void
+;
+; LA64-LABEL: @store_release_i32(
+; LA64-NEXT:    fence release
+; LA64-NEXT:    store atomic i32 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 4
+; LA64-NEXT:    ret void
+;
+  store atomic i32 %v, ptr %ptr release, align 4
+  ret void
+}
+
+define void @store_release_i64(ptr %ptr, i64 %v) {
+; LA32-LABEL: @store_release_i64(
+; LA32-NEXT:    call void @__atomic_store_8(ptr [[PTR:%.*]], i64 [[V:%.*]], i32 3)
+; LA32-NEXT:    ret void
+;
+; LA64-LABEL: @store_release_i64(
+; LA64-NEXT:    fence release
+; LA64-NEXT:    store atomic i64 [[V:%.*]], ptr [[PTR:%.*]] monotonic, align 8
+; LA64-NEXT:    ret void
+;
+  store atomic i64 %v, ptr %ptr release, align 8
+  ret void
+}


        


More information about the llvm-commits mailing list