[llvm] r353266 - [Test] Add codegen tests for unordered and monotonic integer operations

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 5 19:19:04 PST 2019


Author: reames
Date: Tue Feb  5 19:19:04 2019
New Revision: 353266

URL: http://llvm.org/viewvc/llvm-project?rev=353266&view=rev
Log:
[Test] Add codegen tests for unordered and monotonic integer operations


Added:
    llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll
    llvm/trunk/test/CodeGen/X86/atomic-unordered.ll

Added: llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll?rev=353266&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll (added)
+++ llvm/trunk/test/CodeGen/X86/atomic-monotonic.ll Tue Feb  5 19:19:04 2019
@@ -0,0 +1,118 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefix=CHECK-O0 %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefix=CHECK-O3 %s
+
+define i8 @load_i8(i8* %ptr) {
+; CHECK-O0-LABEL: load_i8:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movb (%rdi), %al
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i8:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movb (%rdi), %al
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i8, i8* %ptr monotonic, align 1
+  ret i8 %v
+}
+
+define void @store_i8(i8* %ptr, i8 %v) {
+; CHECK-O0-LABEL: store_i8:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movb %sil, %al
+; CHECK-O0-NEXT:    movb %al, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i8:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movb %sil, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i8 %v, i8* %ptr monotonic, align 1
+  ret void
+}
+
+define i16 @load_i16(i16* %ptr) {
+; CHECK-O0-LABEL: load_i16:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movw (%rdi), %ax
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i16:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movzwl (%rdi), %eax
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i16, i16* %ptr monotonic, align 2
+  ret i16 %v
+}
+
+
+define void @store_i16(i16* %ptr, i16 %v) {
+; CHECK-O0-LABEL: store_i16:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movw %si, %ax
+; CHECK-O0-NEXT:    movw %ax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i16:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movw %si, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i16 %v, i16* %ptr monotonic, align 2
+  ret void
+}
+
+define i32 @load_i32(i32* %ptr) {
+; CHECK-O0-LABEL: load_i32:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movl (%rdi), %eax
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i32:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movl (%rdi), %eax
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i32, i32* %ptr monotonic, align 4
+  ret i32 %v
+}
+
+define void @store_i32(i32* %ptr, i32 %v) {
+; CHECK-O0-LABEL: store_i32:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movl %esi, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i32:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movl %esi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i32 %v, i32* %ptr monotonic, align 4
+  ret void
+}
+
+define i64 @load_i64(i64* %ptr) {
+; CHECK-O0-LABEL: load_i64:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i64:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i64, i64* %ptr monotonic, align 8
+  ret i64 %v
+}
+
+define void @store_i64(i64* %ptr, i64 %v) {
+; CHECK-O0-LABEL: store_i64:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq %rsi, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i64:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq %rsi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i64 %v, i64* %ptr monotonic, align 8
+  ret void
+}

Added: llvm/trunk/test/CodeGen/X86/atomic-unordered.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-unordered.ll?rev=353266&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-unordered.ll (added)
+++ llvm/trunk/test/CodeGen/X86/atomic-unordered.ll Tue Feb  5 19:19:04 2019
@@ -0,0 +1,118 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O0 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefix=CHECK-O0 %s
+; RUN: llc -O3 < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck --check-prefix=CHECK-O3 %s
+
+define i8 @load_i8(i8* %ptr) {
+; CHECK-O0-LABEL: load_i8:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movb (%rdi), %al
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i8:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movb (%rdi), %al
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i8, i8* %ptr unordered, align 1
+  ret i8 %v
+}
+
+define void @store_i8(i8* %ptr, i8 %v) {
+; CHECK-O0-LABEL: store_i8:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movb %sil, %al
+; CHECK-O0-NEXT:    movb %al, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i8:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movb %sil, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i8 %v, i8* %ptr unordered, align 1
+  ret void
+}
+
+define i16 @load_i16(i16* %ptr) {
+; CHECK-O0-LABEL: load_i16:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movw (%rdi), %ax
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i16:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movzwl (%rdi), %eax
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i16, i16* %ptr unordered, align 2
+  ret i16 %v
+}
+
+
+define void @store_i16(i16* %ptr, i16 %v) {
+; CHECK-O0-LABEL: store_i16:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movw %si, %ax
+; CHECK-O0-NEXT:    movw %ax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i16:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movw %si, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i16 %v, i16* %ptr unordered, align 2
+  ret void
+}
+
+define i32 @load_i32(i32* %ptr) {
+; CHECK-O0-LABEL: load_i32:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movl (%rdi), %eax
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i32:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movl (%rdi), %eax
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i32, i32* %ptr unordered, align 4
+  ret i32 %v
+}
+
+define void @store_i32(i32* %ptr, i32 %v) {
+; CHECK-O0-LABEL: store_i32:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movl %esi, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i32:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movl %esi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i32 %v, i32* %ptr unordered, align 4
+  ret void
+}
+
+define i64 @load_i64(i64* %ptr) {
+; CHECK-O0-LABEL: load_i64:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: load_i64:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    retq
+  %v = load atomic i64, i64* %ptr unordered, align 8
+  ret i64 %v
+}
+
+define void @store_i64(i64* %ptr, i64 %v) {
+; CHECK-O0-LABEL: store_i64:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq %rsi, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: store_i64:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq %rsi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  store atomic i64 %v, i64* %ptr unordered, align 8
+  ret void
+}




More information about the llvm-commits mailing list