[llvm] r353966 - [Tests] RMW folding tests w/unordered atomic operations

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 13 10:41:54 PST 2019


Author: reames
Date: Wed Feb 13 10:41:54 2019
New Revision: 353966

URL: http://llvm.org/viewvc/llvm-project?rev=353966&view=rev
Log:
[Tests] RMW folding tests w/unordered atomic operations

We get a suprising number of these today actually, but some are missed. The main point of this is strengthen the test set for D57601.


Modified:
    llvm/trunk/test/CodeGen/X86/atomic-unordered.ll

Modified: llvm/trunk/test/CodeGen/X86/atomic-unordered.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-unordered.ll?rev=353966&r1=353965&r2=353966&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-unordered.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-unordered.ll Wed Feb 13 10:41:54 2019
@@ -1195,3 +1195,621 @@ define i1 @load_fold_icmp3(i64* %p1, i64
   %ret = icmp eq i64 %v, %v2
   ret i1 %ret
 }
+
+; Legal, as expected
+define void @rmw_fold_add1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_add1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    addq $15, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_add1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    addq $15, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = add i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_add2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_add2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    addq %rsi, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_add2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    addq %rsi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = add i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_sub1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_sub1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    addq $-15, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_sub1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    addq $-15, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = sub i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_sub2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_sub2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    subq %rsi, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_sub2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    subq %rsi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = sub i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_mul1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_mul1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    leaq (%rax,%rax,4), %rax
+; CHECK-O0-NEXT:    leaq (%rax,%rax,2), %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_mul1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    leaq (%rax,%rax,4), %rax
+; CHECK-O3-NEXT:    leaq (%rax,%rax,2), %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = mul i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal to fold (TODO)
+define void @rmw_fold_mul2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_mul2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    imulq %rsi, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_mul2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    imulq %rsi, %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = mul i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_sdiv1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_sdiv1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; CHECK-O0-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    imulq %rcx
+; CHECK-O0-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; CHECK-O0-NEXT:    addq %rax, %rdx
+; CHECK-O0-NEXT:    movq %rdx, %rcx
+; CHECK-O0-NEXT:    shrq $63, %rcx
+; CHECK-O0-NEXT:    sarq $3, %rdx
+; CHECK-O0-NEXT:    addq %rcx, %rdx
+; CHECK-O0-NEXT:    movq %rdx, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_sdiv1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rcx
+; CHECK-O3-NEXT:    movabsq $-8608480567731124087, %rdx # imm = 0x8888888888888889
+; CHECK-O3-NEXT:    movq %rcx, %rax
+; CHECK-O3-NEXT:    imulq %rdx
+; CHECK-O3-NEXT:    addq %rcx, %rdx
+; CHECK-O3-NEXT:    movq %rdx, %rax
+; CHECK-O3-NEXT:    shrq $63, %rax
+; CHECK-O3-NEXT:    sarq $3, %rdx
+; CHECK-O3-NEXT:    addq %rax, %rdx
+; CHECK-O3-NEXT:    movq %rdx, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = sdiv i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_sdiv2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_sdiv2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    cqto
+; CHECK-O0-NEXT:    idivq %rsi
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_sdiv2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    cqto
+; CHECK-O3-NEXT:    idivq %rsi
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = sdiv i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_udiv1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_udiv1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; CHECK-O0-NEXT:    mulq %rcx
+; CHECK-O0-NEXT:    shrq $3, %rdx
+; CHECK-O0-NEXT:    movq %rdx, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_udiv1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; CHECK-O3-NEXT:    mulq %rcx
+; CHECK-O3-NEXT:    shrq $3, %rdx
+; CHECK-O3-NEXT:    movq %rdx, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = udiv i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_udiv2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_udiv2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    xorl %ecx, %ecx
+; CHECK-O0-NEXT:    movl %ecx, %edx
+; CHECK-O0-NEXT:    divq %rsi
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_udiv2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    xorl %edx, %edx
+; CHECK-O3-NEXT:    divq %rsi
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = udiv i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_srem1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_srem1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; CHECK-O0-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    imulq %rcx
+; CHECK-O0-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
+; CHECK-O0-NEXT:    addq %rax, %rdx
+; CHECK-O0-NEXT:    movq %rdx, %rcx
+; CHECK-O0-NEXT:    shrq $63, %rcx
+; CHECK-O0-NEXT:    sarq $3, %rdx
+; CHECK-O0-NEXT:    addq %rcx, %rdx
+; CHECK-O0-NEXT:    leaq (%rdx,%rdx,4), %rcx
+; CHECK-O0-NEXT:    leaq (%rcx,%rcx,2), %rcx
+; CHECK-O0-NEXT:    subq %rcx, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_srem1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rcx
+; CHECK-O3-NEXT:    movabsq $-8608480567731124087, %rdx # imm = 0x8888888888888889
+; CHECK-O3-NEXT:    movq %rcx, %rax
+; CHECK-O3-NEXT:    imulq %rdx
+; CHECK-O3-NEXT:    addq %rcx, %rdx
+; CHECK-O3-NEXT:    movq %rdx, %rax
+; CHECK-O3-NEXT:    shrq $63, %rax
+; CHECK-O3-NEXT:    sarq $3, %rdx
+; CHECK-O3-NEXT:    addq %rax, %rdx
+; CHECK-O3-NEXT:    leaq (%rdx,%rdx,4), %rax
+; CHECK-O3-NEXT:    leaq (%rax,%rax,2), %rax
+; CHECK-O3-NEXT:    subq %rax, %rcx
+; CHECK-O3-NEXT:    movq %rcx, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = srem i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_srem2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_srem2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    cqto
+; CHECK-O0-NEXT:    idivq %rsi
+; CHECK-O0-NEXT:    movq %rdx, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_srem2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    cqto
+; CHECK-O3-NEXT:    idivq %rsi
+; CHECK-O3-NEXT:    movq %rdx, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = srem i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_urem1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_urem1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movabsq $-8608480567731124087, %rcx # imm = 0x8888888888888889
+; CHECK-O0-NEXT:    movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    mulq %rcx
+; CHECK-O0-NEXT:    shrq $3, %rdx
+; CHECK-O0-NEXT:    leaq (%rdx,%rdx,4), %rax
+; CHECK-O0-NEXT:    leaq (%rax,%rax,2), %rax
+; CHECK-O0-NEXT:    movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
+; CHECK-O0-NEXT:    subq %rax, %rcx
+; CHECK-O0-NEXT:    movq %rcx, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_urem1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rcx
+; CHECK-O3-NEXT:    movabsq $-8608480567731124087, %rdx # imm = 0x8888888888888889
+; CHECK-O3-NEXT:    movq %rcx, %rax
+; CHECK-O3-NEXT:    mulq %rdx
+; CHECK-O3-NEXT:    shrq $3, %rdx
+; CHECK-O3-NEXT:    leaq (%rdx,%rdx,4), %rax
+; CHECK-O3-NEXT:    leaq (%rax,%rax,2), %rax
+; CHECK-O3-NEXT:    subq %rax, %rcx
+; CHECK-O3-NEXT:    movq %rcx, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = urem i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_urem2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_urem2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    xorl %ecx, %ecx
+; CHECK-O0-NEXT:    movl %ecx, %edx
+; CHECK-O0-NEXT:    divq %rsi
+; CHECK-O0-NEXT:    movq %rdx, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_urem2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    xorl %edx, %edx
+; CHECK-O3-NEXT:    divq %rsi
+; CHECK-O3-NEXT:    movq %rdx, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = urem i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal to fold (TODO)
+define void @rmw_fold_shl1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_shl1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    shlq $15, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_shl1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    shlq $15, %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = shl i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal to fold (TODO)
+define void @rmw_fold_shl2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_shl2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movb %sil, %cl
+; CHECK-O0-NEXT:    shlq %cl, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_shl2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq %rsi, %rcx
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    # kill: def $cl killed $cl killed $rcx
+; CHECK-O3-NEXT:    shlq %cl, %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = shl i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal to fold (TODO)
+define void @rmw_fold_lshr1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_lshr1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    shrq $15, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_lshr1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    shrq $15, %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = lshr i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal to fold (TODO)
+define void @rmw_fold_lshr2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_lshr2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movb %sil, %cl
+; CHECK-O0-NEXT:    shrq %cl, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_lshr2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq %rsi, %rcx
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    # kill: def $cl killed $cl killed $rcx
+; CHECK-O3-NEXT:    shrq %cl, %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = lshr i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal to fold (TODO)
+define void @rmw_fold_ashr1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_ashr1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    sarq $15, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_ashr1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    sarq $15, %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = ashr i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal to fold (TODO)
+define void @rmw_fold_ashr2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_ashr2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movb %sil, %cl
+; CHECK-O0-NEXT:    sarq %cl, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_ashr2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    movq %rsi, %rcx
+; CHECK-O3-NEXT:    movq (%rdi), %rax
+; CHECK-O3-NEXT:    # kill: def $cl killed $cl killed $rcx
+; CHECK-O3-NEXT:    sarq %cl, %rax
+; CHECK-O3-NEXT:    movq %rax, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = ashr i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_and1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_and1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    movl %eax, %ecx
+; CHECK-O0-NEXT:    andl $15, %ecx
+; CHECK-O0-NEXT:    movl %ecx, %eax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_and1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    andq $15, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = and i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_and2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_and2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    andq %rsi, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_and2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    andq %rsi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = and i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_or1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_or1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    orq $15, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_or1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    orq $15, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = or i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_or2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_or2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    orq %rsi, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_or2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    orq %rsi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = or i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_xor1(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_xor1:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    xorq $15, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_xor1:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    xorq $15, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = xor i64 %prev, 15
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+; Legal, as expected
+define void @rmw_fold_xor2(i64* %p, i64 %v) {
+; CHECK-O0-LABEL: rmw_fold_xor2:
+; CHECK-O0:       # %bb.0:
+; CHECK-O0-NEXT:    movq (%rdi), %rax
+; CHECK-O0-NEXT:    xorq %rsi, %rax
+; CHECK-O0-NEXT:    movq %rax, (%rdi)
+; CHECK-O0-NEXT:    retq
+;
+; CHECK-O3-LABEL: rmw_fold_xor2:
+; CHECK-O3:       # %bb.0:
+; CHECK-O3-NEXT:    xorq %rsi, (%rdi)
+; CHECK-O3-NEXT:    retq
+  %prev = load atomic i64, i64* %p unordered, align 8
+  %val = xor i64 %prev, %v
+  store atomic i64 %val, i64* %p unordered, align 8
+  ret void
+}
+
+
+




More information about the llvm-commits mailing list