[llvm] r363181 - [X86][SSE] Avoid unnecessary stack codegen in NT merge-consecutive-stores codegen tests.

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 12 10:28:48 PDT 2019


Author: rksimon
Date: Wed Jun 12 10:28:48 2019
New Revision: 363181

URL: http://llvm.org/viewvc/llvm-project?rev=363181&view=rev
Log:
[X86][SSE] Avoid unnecessary stack codegen in NT merge-consecutive-stores codegen tests.

Modified:
    llvm/trunk/test/CodeGen/X86/merge-consecutive-stores-nt.ll

Modified: llvm/trunk/test/CodeGen/X86/merge-consecutive-stores-nt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/merge-consecutive-stores-nt.ll?rev=363181&r1=363180&r2=363181&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/merge-consecutive-stores-nt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/merge-consecutive-stores-nt.ll Wed Jun 12 10:28:48 2019
@@ -10,7 +10,7 @@
 ; PR42123
 ;
 
-define void @merge_2_v4f32_align32(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align32(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align32:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -71,7 +71,7 @@ define void @merge_2_v4f32_align32(<4 x
 }
 
 ; Don't merge nt and non-nt loads even if aligned.
-define void @merge_2_v4f32_align32_mix_ntload(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align32_mix_ntload(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align32_mix_ntload:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -125,7 +125,7 @@ define void @merge_2_v4f32_align32_mix_n
 }
 
 ; Don't merge nt and non-nt stores even if aligned.
-define void @merge_2_v4f32_align32_mix_ntstore(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align32_mix_ntstore(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align32_mix_ntstore:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -164,7 +164,7 @@ define void @merge_2_v4f32_align32_mix_n
 
 ; FIXME: AVX2 can't perform NT-load-ymm on 16-byte aligned memory.
 ; Must be kept seperate as VMOVNTDQA xmm.
-define void @merge_2_v4f32_align16_ntload(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align16_ntload(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align16_ntload:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -226,7 +226,7 @@ define void @merge_2_v4f32_align16_ntloa
 
 ; FIXME: AVX can't perform NT-store-ymm on 16-byte aligned memory.
 ; Must be kept seperate as VMOVNTPS xmm.
-define void @merge_2_v4f32_align16_ntstore(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align16_ntstore(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align16_ntstore:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -264,7 +264,7 @@ define void @merge_2_v4f32_align16_ntsto
 
 ; FIXME: Nothing can perform NT-load-vector on 1-byte aligned memory.
 ; Just perform regular loads.
-define void @merge_2_v4f32_align1_ntload(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align1_ntload(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align1_ntload:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -302,7 +302,7 @@ define void @merge_2_v4f32_align1_ntload
 
 ; FIXME: Nothing can perform NT-store-vector on 1-byte aligned memory.
 ; Must be scalarized to use MOVTNI/MOVNTSD.
-define void @merge_2_v4f32_align1_ntstore(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align1_ntstore(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align1_ntstore:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
@@ -340,7 +340,7 @@ define void @merge_2_v4f32_align1_ntstor
 
 ; FIXME: Nothing can perform NT-load-vector on 1-byte aligned memory.
 ; Just perform regular loads and scalarize NT-stores.
-define void @merge_2_v4f32_align1(<4 x float>* %a0, <4 x float>* %a1)  {
+define void @merge_2_v4f32_align1(<4 x float>* %a0, <4 x float>* %a1) nounwind {
 ; X86-LABEL: merge_2_v4f32_align1:
 ; X86:       # %bb.0:
 ; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax




More information about the llvm-commits mailing list