[llvm] r271654 - [X86][SSE] Regenerated nontemporal vector store tests and added extra target types
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Fri Jun 3 03:24:24 PDT 2016
Author: rksimon
Date: Fri Jun 3 05:24:24 2016
New Revision: 271654
URL: http://llvm.org/viewvc/llvm-project?rev=271654&view=rev
Log:
[X86][SSE] Regenerated nontemporal vector store tests and added extra target types
Modified:
llvm/trunk/test/CodeGen/X86/nontemporal-2.ll
Modified: llvm/trunk/test/CodeGen/X86/nontemporal-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/nontemporal-2.ll?rev=271654&r1=271653&r2=271654&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/nontemporal-2.ll (original)
+++ llvm/trunk/test/CodeGen/X86/nontemporal-2.ll Fri Jun 3 05:24:24 2016
@@ -1,68 +1,143 @@
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s -check-prefix=CHECK -check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s -check-prefix=CHECK -check-prefix=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s -check-prefix=CHECK -check-prefix=AVX2
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s -check-prefix=CHECK -check-prefix=VLX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefix=SSE --check-prefix=SSE4A
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=VLX
; Make sure that we generate non-temporal stores for the test cases below.
; We use xorps for zeroing, so domain information isn't available anymore.
define void @test_zero_v4f32(<4 x float>* %dst) {
-; CHECK-LABEL: test_zero_v4f32:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_zero_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v4f32:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v4f32:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %xmm0, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <4 x float> zeroinitializer, <4 x float>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_zero_v4i32(<4 x i32>* %dst) {
-; CHECK-LABEL: test_zero_v4i32:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_zero_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v4i32:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %xmm0, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1
store <4 x i32> zeroinitializer, <4 x i32>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_zero_v2f64(<2 x double>* %dst) {
-; CHECK-LABEL: test_zero_v2f64:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_zero_v2f64:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v2f64:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %xmm0, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <2 x double> zeroinitializer, <2 x double>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_zero_v2i64(<2 x i64>* %dst) {
-; CHECK-LABEL: test_zero_v2i64:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_zero_v2i64:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v2i64:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %xmm0, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <2 x i64> zeroinitializer, <2 x i64>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_zero_v8i16(<8 x i16>* %dst) {
-; CHECK-LABEL: test_zero_v8i16:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_zero_v8i16:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v8i16:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %xmm0, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <8 x i16> zeroinitializer, <8 x i16>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_zero_v16i8(<16 x i8>* %dst) {
-; CHECK-LABEL: test_zero_v16i8:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_zero_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v16i8:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %xmm0, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <16 x i8> zeroinitializer, <16 x i8>* %dst, align 16, !nontemporal !1
ret void
}
@@ -70,55 +145,145 @@ define void @test_zero_v16i8(<16 x i8>*
; And now YMM versions.
define void @test_zero_v8f32(<8 x float>* %dst) {
-; CHECK-LABEL: test_zero_v8f32:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_zero_v8f32:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v8f32:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %ymm0, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <8 x float> zeroinitializer, <8 x float>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_zero_v8i32(<8 x i32>* %dst) {
-; CHECK-LABEL: test_zero_v8i32:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_zero_v8i32:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v8i32:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %ymm0, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <8 x i32> zeroinitializer, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_zero_v4f64(<4 x double>* %dst) {
-; CHECK-LABEL: test_zero_v4f64:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_zero_v4f64:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v4f64:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %ymm0, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <4 x double> zeroinitializer, <4 x double>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_zero_v4i64(<4 x i64>* %dst) {
-; CHECK-LABEL: test_zero_v4i64:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_zero_v4i64:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v4i64:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %ymm0, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <4 x i64> zeroinitializer, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_zero_v16i16(<16 x i16>* %dst) {
-; CHECK-LABEL: test_zero_v16i16:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_zero_v16i16:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v16i16:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %ymm0, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <16 x i16> zeroinitializer, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_zero_v32i8(<32 x i8>* %dst) {
-; CHECK-LABEL: test_zero_v32i8:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_zero_v32i8:
+; SSE: # BB#0:
+; SSE-NEXT: xorps %xmm0, %xmm0
+; SSE-NEXT: movntps %xmm0, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_zero_v32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_zero_v32i8:
+; VLX: # BB#0:
+; VLX-NEXT: vpxord %ymm0, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <32 x i8> zeroinitializer, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
}
@@ -127,61 +292,115 @@ define void @test_zero_v32i8(<32 x i8>*
; Check that we also handle arguments. Here the type survives longer.
define void @test_arg_v4f32(<4 x float> %arg, <4 x float>* %dst) {
-; CHECK-LABEL: test_arg_v4f32:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntps
+; SSE-LABEL: test_arg_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v4f32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v4f32:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntps %xmm0, (%rdi)
+; VLX-NEXT: retq
store <4 x float> %arg, <4 x float>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %dst) {
-; CHECK-LABEL: test_arg_v4i32:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_arg_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v4i32:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <4 x i32> %arg, <4 x i32>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_arg_v2f64(<2 x double> %arg, <2 x double>* %dst) {
-; CHECK-LABEL: test_arg_v2f64:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntpd
+; SSE-LABEL: test_arg_v2f64:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v2f64:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntpd %xmm0, (%rdi)
+; VLX-NEXT: retq
store <2 x double> %arg, <2 x double>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %dst) {
-; CHECK-LABEL: test_arg_v2i64:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_arg_v2i64:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v2i64:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <2 x i64> %arg, <2 x i64>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %dst) {
-; CHECK-LABEL: test_arg_v8i16:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_arg_v8i16:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v8i16:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <8 x i16> %arg, <8 x i16>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %dst) {
-; CHECK-LABEL: test_arg_v16i8:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntdq
+; SSE-LABEL: test_arg_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v16i8:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
store <16 x i8> %arg, <16 x i8>* %dst, align 16, !nontemporal !1
ret void
}
@@ -189,55 +408,127 @@ define void @test_arg_v16i8(<16 x i8> %a
; And now YMM versions.
define void @test_arg_v8f32(<8 x float> %arg, <8 x float>* %dst) {
-; CHECK-LABEL: test_arg_v8f32:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntps %ymm
+; SSE-LABEL: test_arg_v8f32:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v8f32:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: retq
store <8 x float> %arg, <8 x float>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %dst) {
-; CHECK-LABEL: test_arg_v8i32:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_arg_v8i32:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v8i32:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v8i32:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <8 x i32> %arg, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_arg_v4f64(<4 x double> %arg, <4 x double>* %dst) {
-; CHECK-LABEL: test_arg_v4f64:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntpd %ymm
+; SSE-LABEL: test_arg_v4f64:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v4f64:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntpd %ymm0, (%rdi)
+; VLX-NEXT: retq
store <4 x double> %arg, <4 x double>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %dst) {
-; CHECK-LABEL: test_arg_v4i64:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_arg_v4i64:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v4i64:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v4i64:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <4 x i64> %arg, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %dst) {
-; CHECK-LABEL: test_arg_v16i16:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_arg_v16i16:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v16i16:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v16i16:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <16 x i16> %arg, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %dst) {
-; CHECK-LABEL: test_arg_v32i8:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_arg_v32i8:
+; SSE: # BB#0:
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_arg_v32i8:
+; AVX: # BB#0:
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_arg_v32i8:
+; VLX: # BB#0:
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
store <32 x i8> %arg, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
}
@@ -247,66 +538,138 @@ define void @test_arg_v32i8(<32 x i8> %a
; We use an add to make the type survive all the way to the MOVNT.
define void @test_op_v4f32(<4 x float> %a, <4 x float> %b, <4 x float>* %dst) {
-; CHECK-LABEL: test_op_v4f32:
-; SSE: movntps
-; AVX: vmovntps
-; AVX2: vmovntps
-; VLX: vmovntps
+; SSE-LABEL: test_op_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v4f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovntps %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v4f32:
+; VLX: # BB#0:
+; VLX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; VLX-NEXT: vmovntps %xmm0, (%rdi)
+; VLX-NEXT: retq
%r = fadd <4 x float> %a, %b
store <4 x float> %r, <4 x float>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_op_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32>* %dst) {
-; CHECK-LABEL: test_op_v4i32:
-; SSE: movntdq
-; AVX: vmovntdq
-; AVX2: vmovntdq
-; VLX: vmovntdq
+; SSE-LABEL: test_op_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: paddd %xmm1, %xmm0
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovntdq %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v4i32:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
%r = add <4 x i32> %a, %b
store <4 x i32> %r, <4 x i32>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_op_v2f64(<2 x double> %a, <2 x double> %b, <2 x double>* %dst) {
-; CHECK-LABEL: test_op_v2f64:
-; SSE: movntpd
-; AVX: vmovntpd
-; AVX2: vmovntpd
-; VLX: vmovntpd
+; SSE-LABEL: test_op_v2f64:
+; SSE: # BB#0:
+; SSE-NEXT: addpd %xmm1, %xmm0
+; SSE-NEXT: movntpd %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovntpd %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v2f64:
+; VLX: # BB#0:
+; VLX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; VLX-NEXT: vmovntpd %xmm0, (%rdi)
+; VLX-NEXT: retq
%r = fadd <2 x double> %a, %b
store <2 x double> %r, <2 x double>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_op_v2i64(<2 x i64> %a, <2 x i64> %b, <2 x i64>* %dst) {
-; CHECK-LABEL: test_op_v2i64:
-; SSE: movntdq
-; AVX: vmovntdq
-; AVX2: vmovntdq
-; VLX: vmovntdq
+; SSE-LABEL: test_op_v2i64:
+; SSE: # BB#0:
+; SSE-NEXT: paddq %xmm1, %xmm0
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v2i64:
+; AVX: # BB#0:
+; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovntdq %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v2i64:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
%r = add <2 x i64> %a, %b
store <2 x i64> %r, <2 x i64>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_op_v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16>* %dst) {
-; CHECK-LABEL: test_op_v8i16:
-; SSE: movntdq
-; AVX: vmovntdq
-; AVX2: vmovntdq
-; VLX: vmovntdq
+; SSE-LABEL: test_op_v8i16:
+; SSE: # BB#0:
+; SSE-NEXT: paddw %xmm1, %xmm0
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v8i16:
+; AVX: # BB#0:
+; AVX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovntdq %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v8i16:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
%r = add <8 x i16> %a, %b
store <8 x i16> %r, <8 x i16>* %dst, align 16, !nontemporal !1
ret void
}
define void @test_op_v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8>* %dst) {
-; CHECK-LABEL: test_op_v16i8:
-; SSE: movntdq
-; AVX: vmovntdq
-; AVX2: vmovntdq
-; VLX: vmovntdq
+; SSE-LABEL: test_op_v16i8:
+; SSE: # BB#0:
+; SSE-NEXT: paddb %xmm1, %xmm0
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v16i8:
+; AVX: # BB#0:
+; AVX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmovntdq %xmm0, (%rdi)
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v16i8:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; VLX-NEXT: vmovntdq %xmm0, (%rdi)
+; VLX-NEXT: retq
%r = add <16 x i8> %a, %b
store <16 x i8> %r, <16 x i8>* %dst, align 16, !nontemporal !1
ret void
@@ -315,60 +678,200 @@ define void @test_op_v16i8(<16 x i8> %a,
; And now YMM versions.
define void @test_op_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) {
-; CHECK-LABEL: test_op_v8f32:
-; AVX: vmovntps %ymm
-; AVX2: vmovntps %ymm
-; VLX: vmovntps %ymm
+; SSE-LABEL: test_op_v8f32:
+; SSE: # BB#0:
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm3, %xmm1
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmovntps %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v8f32:
+; VLX: # BB#0:
+; VLX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; VLX-NEXT: vmovntps %ymm0, (%rdi)
+; VLX-NEXT: retq
%r = fadd <8 x float> %a, %b
store <8 x float> %r, <8 x float>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_op_v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32>* %dst) {
-; CHECK-LABEL: test_op_v8i32:
-; AVX: vmovntps %ymm
-; AVX2: vmovntdq %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_op_v8i32:
+; SSE: # BB#0:
+; SSE-NEXT: paddd %xmm2, %xmm0
+; SSE-NEXT: paddd %xmm3, %xmm1
+; SSE-NEXT: movntdq %xmm1, 16(%rdi)
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_op_v8i32:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovntps %ymm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_op_v8i32:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; VLX-LABEL: test_op_v8i32:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddd %ymm1, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
%r = add <8 x i32> %a, %b
store <8 x i32> %r, <8 x i32>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_op_v4f64(<4 x double> %a, <4 x double> %b, <4 x double>* %dst) {
-; CHECK-LABEL: test_op_v4f64:
-; AVX: vmovntpd %ymm
-; AVX2: vmovntpd %ymm
-; VLX: vmovntpd %ymm
+; SSE-LABEL: test_op_v4f64:
+; SSE: # BB#0:
+; SSE-NEXT: addpd %xmm2, %xmm0
+; SSE-NEXT: addpd %xmm3, %xmm1
+; SSE-NEXT: movntpd %xmm1, 16(%rdi)
+; SSE-NEXT: movntpd %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_op_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmovntpd %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_op_v4f64:
+; VLX: # BB#0:
+; VLX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; VLX-NEXT: vmovntpd %ymm0, (%rdi)
+; VLX-NEXT: retq
%r = fadd <4 x double> %a, %b
store <4 x double> %r, <4 x double>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_op_v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64>* %dst) {
-; CHECK-LABEL: test_op_v4i64:
-; AVX: vmovntps %ymm
-; AVX2: vmovntdq %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_op_v4i64:
+; SSE: # BB#0:
+; SSE-NEXT: paddq %xmm2, %xmm0
+; SSE-NEXT: paddq %xmm3, %xmm1
+; SSE-NEXT: movntdq %xmm1, 16(%rdi)
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_op_v4i64:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpaddq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovntps %ymm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_op_v4i64:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; VLX-LABEL: test_op_v4i64:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddq %ymm1, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
%r = add <4 x i64> %a, %b
store <4 x i64> %r, <4 x i64>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_op_v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16>* %dst) {
-; CHECK-LABEL: test_op_v16i16:
-; AVX: vmovntps %ymm
-; AVX2: vmovntdq %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_op_v16i16:
+; SSE: # BB#0:
+; SSE-NEXT: paddw %xmm2, %xmm0
+; SSE-NEXT: paddw %xmm3, %xmm1
+; SSE-NEXT: movntdq %xmm1, 16(%rdi)
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_op_v16i16:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpaddw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovntps %ymm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_op_v16i16:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; VLX-LABEL: test_op_v16i16:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddw %ymm1, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
%r = add <16 x i16> %a, %b
store <16 x i16> %r, <16 x i16>* %dst, align 32, !nontemporal !1
ret void
}
define void @test_op_v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8>* %dst) {
-; CHECK-LABEL: test_op_v32i8:
-; AVX: vmovntps %ymm
-; AVX2: vmovntdq %ymm
-; VLX: vmovntdq %ymm
+; SSE-LABEL: test_op_v32i8:
+; SSE: # BB#0:
+; SSE-NEXT: paddb %xmm2, %xmm0
+; SSE-NEXT: paddb %xmm3, %xmm1
+; SSE-NEXT: movntdq %xmm1, 16(%rdi)
+; SSE-NEXT: movntdq %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: test_op_v32i8:
+; AVX1: # BB#0:
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vpaddb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpaddb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vmovntps %ymm0, (%rdi)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: test_op_v32i8:
+; AVX2: # BB#0:
+; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vmovntdq %ymm0, (%rdi)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; VLX-LABEL: test_op_v32i8:
+; VLX: # BB#0:
+; VLX-NEXT: vpaddb %ymm1, %ymm0, %ymm0
+; VLX-NEXT: vmovntdq %ymm0, (%rdi)
+; VLX-NEXT: retq
%r = add <32 x i8> %a, %b
store <32 x i8> %r, <32 x i8>* %dst, align 32, !nontemporal !1
ret void
@@ -379,15 +882,26 @@ define void @test_op_v32i8(<32 x i8> %a,
; could even scalarize to movnti when we have 1-alignment: nontemporal is
; probably always worth even some 20 instruction scalarization.
define void @test_unaligned_v8f32(<8 x float> %a, <8 x float> %b, <8 x float>* %dst) {
-; CHECK-LABEL: test_unaligned_v8f32:
-; SSE: movntps %xmm
-; SSE: movntps %xmm
-; AVX-NOT: movnt
-; AVX: vmovups %ymm
-; AVX2-NOT: movnt
-; AVX2: vmovups %ymm
-; VLX-NOT: movnt
-; VLX: vmovups %ymm
+; SSE-LABEL: test_unaligned_v8f32:
+; SSE: # BB#0:
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm3, %xmm1
+; SSE-NEXT: movntps %xmm1, 16(%rdi)
+; SSE-NEXT: movntps %xmm0, (%rdi)
+; SSE-NEXT: retq
+;
+; AVX-LABEL: test_unaligned_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmovups %ymm0, (%rdi)
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: retq
+;
+; VLX-LABEL: test_unaligned_v8f32:
+; VLX: # BB#0:
+; VLX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; VLX-NEXT: vmovups %ymm0, (%rdi)
+; VLX-NEXT: retq
%r = fadd <8 x float> %a, %b
store <8 x float> %r, <8 x float>* %dst, align 16, !nontemporal !1
ret void
More information about the llvm-commits
mailing list