[llvm] b59f135 - Precommit tests from D119844, expanded with additional coverage
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Wed Feb 16 07:55:55 PST 2022
Author: Philip Reames
Date: 2022-02-16T07:55:43-08:00
New Revision: b59f135f1602e52cb5cd57c03dd86fdd6b419f51
URL: https://github.com/llvm/llvm-project/commit/b59f135f1602e52cb5cd57c03dd86fdd6b419f51
DIFF: https://github.com/llvm/llvm-project/commit/b59f135f1602e52cb5cd57c03dd86fdd6b419f51.diff
LOG: Precommit tests from D119844, expanded with additional coverage
Added:
llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
new file mode 100644
index 0000000000000..a06c7e0792792
--- /dev/null
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -gvn -basic-aa < %s | FileCheck %s
+
+ at u = global i32 5, align 4
+ at w = global i32 10, align 4
+
+define i32 @test_load_seq_cst_unordered() {
+; CHECK-LABEL: @test_load_seq_cst_unordered(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[LV:%.*]] = load atomic i32, i32* @u seq_cst, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ %lv = load atomic i32, i32* @u seq_cst, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res.1 = sub i32 %l1, %l2
+ %res = add i32 %res.1, %lv
+ ret i32 %res
+}
+
+define i32 @test_load_acquire_unordered() {
+; CHECK-LABEL: @test_load_acquire_unordered(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[LV:%.*]] = load atomic i32, i32* @u acquire, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ %lv = load atomic i32, i32* @u acquire, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res.1 = sub i32 %l1, %l2
+ %res = add i32 %res.1, %lv
+ ret i32 %res
+}
+
+define i32 @test_store_cst_unordered(i32 %x) {
+; CHECK-LABEL: @test_store_cst_unordered(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @u seq_cst, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ store atomic i32 %x, i32* @u seq_cst, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res = sub i32 %l1, %l2
+ ret i32 %res
+}
+
+define i32 @test_store_release_unordered(i32 %x) {
+; CHECK-LABEL: @test_store_release_unordered(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @u release, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
+; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ store atomic i32 %x, i32* @u release, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res = sub i32 %l1, %l2
+ ret i32 %res
+}
+
+define i32 @test_stores_seq_cst_unordered(i32 %x) {
+; CHECK-LABEL: @test_stores_seq_cst_unordered(
+; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X]], i32* @u seq_cst, align 4
+; CHECK-NEXT: store atomic i32 0, i32* @w unordered, align 4
+; CHECK-NEXT: ret i32 0
+;
+ store atomic i32 %x, i32* @w unordered, align 4
+ store atomic i32 %x, i32* @u seq_cst, align 4
+ store atomic i32 0, i32* @w unordered, align 4
+ ret i32 0
+}
+
+define i32 @test_stores_release_unordered(i32 %x) {
+; CHECK-LABEL: @test_stores_release_unordered(
+; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @w unordered, align 4
+; CHECK-NEXT: store atomic i32 [[X]], i32* @u release, align 4
+; CHECK-NEXT: store atomic i32 0, i32* @w unordered, align 4
+; CHECK-NEXT: ret i32 0
+;
+ store atomic i32 %x, i32* @w unordered, align 4
+ store atomic i32 %x, i32* @u release, align 4
+ store atomic i32 0, i32* @w unordered, align 4
+ ret i32 0
+}
+
+
+; Must respect total order for seq_cst even for unrelated addresses
+define i32 @neg_load_seq_cst() {
+; CHECK-LABEL: @neg_load_seq_cst(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[LV:%.*]] = load atomic i32, i32* @u seq_cst, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[RES_1:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: [[RES:%.*]] = add i32 [[RES_1]], [[LV]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w seq_cst, align 4
+ %lv = load atomic i32, i32* @u seq_cst, align 4
+ %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %res.1 = sub i32 %l1, %l2
+ %res = add i32 %res.1, %lv
+ ret i32 %res
+}
+
+define i32 @neg_store_seq_cst(i32 %x) {
+; CHECK-LABEL: @neg_store_seq_cst(
+; CHECK-NEXT: [[L1:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @u seq_cst, align 4
+; CHECK-NEXT: [[L2:%.*]] = load atomic i32, i32* @w seq_cst, align 4
+; CHECK-NEXT: [[RES:%.*]] = sub i32 [[L1]], [[L2]]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+ %l1 = load atomic i32, i32* @w seq_cst, align 4
+ store atomic i32 %x, i32* @u seq_cst, align 4
+ %l2 = load atomic i32, i32* @w seq_cst, align 4
+ %res = sub i32 %l1, %l2
+ ret i32 %res
+}
+
+define i32 @neg_stores_seq_cst(i32 %x) {
+; CHECK-LABEL: @neg_stores_seq_cst(
+; CHECK-NEXT: store atomic i32 [[X:%.*]], i32* @w seq_cst, align 4
+; CHECK-NEXT: store atomic i32 [[X]], i32* @u seq_cst, align 4
+; CHECK-NEXT: store atomic i32 0, i32* @w seq_cst, align 4
+; CHECK-NEXT: ret i32 0
+;
+ store atomic i32 %x, i32* @w seq_cst, align 4
+ store atomic i32 %x, i32* @u seq_cst, align 4
+ store atomic i32 0, i32* @w seq_cst, align 4
+ ret i32 0
+}
More information about the llvm-commits
mailing list