[llvm] bce7f94 - [x86] scrub less memory ops in test; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 11 12:00:21 PST 2022


Author: Sanjay Patel
Date: 2022-02-11T15:00:09-05:00
New Revision: bce7f942bc4cfa1bacca678451ddd5c65be011e7

URL: https://github.com/llvm/llvm-project/commit/bce7f942bc4cfa1bacca678451ddd5c65be011e7
DIFF: https://github.com/llvm/llvm-project/commit/bce7f942bc4cfa1bacca678451ddd5c65be011e7.diff

LOG: [x86] scrub less memory ops in test; NFC

The addresses matter - we want to verify the splitting
and order of the memops.

Added: 
    

Modified: 
    llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll b/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll
index 2883beb6b01d..07094e2b93d0 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_mem_shuffle
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,X86-SSE2
 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefixes=X86,X86-SSE4A
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64-SSE,X64-SSE2
@@ -332,10 +332,10 @@ define void @merge_2_v4f32_align1_ntstore(<4 x float>* %a0, <4 x float>* %a1) no
 ; X86-SSE4A:       # %bb.0:
 ; X86-SSE4A-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE4A-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd (%ecx), %xmm0 # xmm0 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd 8(%ecx), %xmm1 # xmm1 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd 16(%ecx), %xmm2 # xmm2 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd 24(%ecx), %xmm3 # xmm3 = mem[0],zero
 ; X86-SSE4A-NEXT:    movntsd %xmm0, (%eax)
 ; X86-SSE4A-NEXT:    movntsd %xmm1, 8(%eax)
 ; X86-SSE4A-NEXT:    movntsd %xmm3, 24(%eax)
@@ -360,10 +360,10 @@ define void @merge_2_v4f32_align1_ntstore(<4 x float>* %a0, <4 x float>* %a1) no
 ;
 ; X64-SSE4A-LABEL: merge_2_v4f32_align1_ntstore:
 ; X64-SSE4A:       # %bb.0:
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd (%rdi), %xmm0 # xmm0 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd 8(%rdi), %xmm1 # xmm1 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd 16(%rdi), %xmm2 # xmm2 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd 24(%rdi), %xmm3 # xmm3 = mem[0],zero
 ; X64-SSE4A-NEXT:    movntsd %xmm0, (%rsi)
 ; X64-SSE4A-NEXT:    movntsd %xmm1, 8(%rsi)
 ; X64-SSE4A-NEXT:    movntsd %xmm3, 24(%rsi)
@@ -445,10 +445,10 @@ define void @merge_2_v4f32_align1(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-SSE4A:       # %bb.0:
 ; X86-SSE4A-NEXT:    movl {{[0-9]+}}(%esp), %eax
 ; X86-SSE4A-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X86-SSE4A-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd (%ecx), %xmm0 # xmm0 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd 8(%ecx), %xmm1 # xmm1 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd 16(%ecx), %xmm2 # xmm2 = mem[0],zero
+; X86-SSE4A-NEXT:    movsd 24(%ecx), %xmm3 # xmm3 = mem[0],zero
 ; X86-SSE4A-NEXT:    movntsd %xmm0, (%eax)
 ; X86-SSE4A-NEXT:    movntsd %xmm1, 8(%eax)
 ; X86-SSE4A-NEXT:    movntsd %xmm3, 24(%eax)
@@ -473,10 +473,10 @@ define void @merge_2_v4f32_align1(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ;
 ; X64-SSE4A-LABEL: merge_2_v4f32_align1:
 ; X64-SSE4A:       # %bb.0:
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
-; X64-SSE4A-NEXT:    movsd {{.*#+}} xmm3 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd (%rdi), %xmm0 # xmm0 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd 8(%rdi), %xmm1 # xmm1 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd 16(%rdi), %xmm2 # xmm2 = mem[0],zero
+; X64-SSE4A-NEXT:    movsd 24(%rdi), %xmm3 # xmm3 = mem[0],zero
 ; X64-SSE4A-NEXT:    movntsd %xmm0, (%rsi)
 ; X64-SSE4A-NEXT:    movntsd %xmm1, 8(%rsi)
 ; X64-SSE4A-NEXT:    movntsd %xmm3, 24(%rsi)


        


More information about the llvm-commits mailing list