[llvm] 3bae2a4 - [X86] Precommit new tests from D68757. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 30 14:35:33 PDT 2019
Author: Craig Topper
Date: 2019-10-30T14:34:08-07:00
New Revision: 3bae2a4cf7f3ca3382c62f6008d540bf658024e3
URL: https://github.com/llvm/llvm-project/commit/3bae2a4cf7f3ca3382c62f6008d540bf658024e3
DIFF: https://github.com/llvm/llvm-project/commit/3bae2a4cf7f3ca3382c62f6008d540bf658024e3.diff
LOG: [X86] Precommit new tests from D68757. NFC
Added:
llvm/test/CodeGen/X86/fp-strict-scalar.ll
llvm/test/CodeGen/X86/vec-strict-128.ll
llvm/test/CodeGen/X86/vec-strict-256.ll
llvm/test/CodeGen/X86/vec-strict-512.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/fp-strict-scalar.ll b/llvm/test/CodeGen/X86/fp-strict-scalar.ll
new file mode 100644
index 000000000000..f54b3ac67306
--- /dev/null
+++ b/llvm/test/CodeGen/X86/fp-strict-scalar.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=-sse -O3 | FileCheck %s --check-prefixes=X87
+
+declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+declare double @llvm.experimental.constrained.fsub.f64(double, double, metadata, metadata)
+declare float @llvm.experimental.constrained.fsub.f32(float, float, metadata, metadata)
+declare double @llvm.experimental.constrained.fmul.f64(double, double, metadata, metadata)
+declare float @llvm.experimental.constrained.fmul.f32(float, float, metadata, metadata)
+declare double @llvm.experimental.constrained.fdiv.f64(double, double, metadata, metadata)
+declare float @llvm.experimental.constrained.fdiv.f32(float, float, metadata, metadata)
+
+define x86_regcallcc double @f1(double %a, double %b) #0 {
+; SSE-LABEL: f1:
+; SSE: # %bb.0:
+; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f1:
+; AVX: # %bb.0:
+; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f1:
+; X87: # %bb.0:
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: faddl {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %ret
+}
+
+define x86_regcallcc float @f2(float %a, float %b) #0 {
+; SSE-LABEL: f2:
+; SSE: # %bb.0:
+; SSE-NEXT: addss %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f2:
+; AVX: # %bb.0:
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f2:
+; X87: # %bb.0:
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fadds {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %ret
+}
+
+define x86_regcallcc double @f3(double %a, double %b) #0 {
+; SSE-LABEL: f3:
+; SSE: # %bb.0:
+; SSE-NEXT: subsd %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f3:
+; AVX: # %bb.0:
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f3:
+; X87: # %bb.0:
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fsubl {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %ret
+}
+
+define x86_regcallcc float @f4(float %a, float %b) #0 {
+; SSE-LABEL: f4:
+; SSE: # %bb.0:
+; SSE-NEXT: subss %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f4:
+; AVX: # %bb.0:
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f4:
+; X87: # %bb.0:
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fsubs {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %ret
+}
+
+define x86_regcallcc double @f5(double %a, double %b) #0 {
+; SSE-LABEL: f5:
+; SSE: # %bb.0:
+; SSE-NEXT: mulsd %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f5:
+; AVX: # %bb.0:
+; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f5:
+; X87: # %bb.0:
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fmull {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %ret
+}
+
+define x86_regcallcc float @f6(float %a, float %b) #0 {
+; SSE-LABEL: f6:
+; SSE: # %bb.0:
+; SSE-NEXT: mulss %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f6:
+; AVX: # %bb.0:
+; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f6:
+; X87: # %bb.0:
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fmuls {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %ret
+}
+
+define x86_regcallcc double @f7(double %a, double %b) #0 {
+; SSE-LABEL: f7:
+; SSE: # %bb.0:
+; SSE-NEXT: divsd %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f7:
+; AVX: # %bb.0:
+; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f7:
+; X87: # %bb.0:
+; X87-NEXT: fldl {{[0-9]+}}(%esp)
+; X87-NEXT: fdivl {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %ret
+}
+
+define x86_regcallcc float @f8(float %a, float %b) #0 {
+; SSE-LABEL: f8:
+; SSE: # %bb.0:
+; SSE-NEXT: divss %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f8:
+; AVX: # %bb.0:
+; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+;
+; X87-LABEL: f8:
+; X87: # %bb.0:
+; X87-NEXT: flds {{[0-9]+}}(%esp)
+; X87-NEXT: fdivs {{[0-9]+}}(%esp)
+; X87-NEXT: retl
+ %ret = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret float %ret
+}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/X86/vec-strict-128.ll b/llvm/test/CodeGen/X86/vec-strict-128.ll
new file mode 100644
index 000000000000..fea87eb16694
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vec-strict-128.ll
@@ -0,0 +1,226 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefixes=CHECK,SSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefixes=CHECK,AVX
+
+declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
+declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata)
+
+define <2 x double> @f1(<2 x double> %a, <2 x double> %b) #0 {
+; SSE-LABEL: f1:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: addsd %xmm1, %xmm2
+; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f1:
+; AVX: # %bb.0:
+; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %a, <2 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %ret
+}
+
+define <4 x float> @f2(<4 x float> %a, <4 x float> %b) #0 {
+; SSE-LABEL: f2:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
+; SSE-NEXT: addss %xmm2, %xmm3
+; SSE-NEXT: movaps %xmm1, %xmm2
+; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE-NEXT: movaps %xmm0, %xmm4
+; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
+; SSE-NEXT: addss %xmm2, %xmm4
+; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: addss %xmm1, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE-NEXT: addss %xmm1, %xmm0
+; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f2:
+; AVX: # %bb.0:
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX-NEXT: vaddss %xmm3, %xmm4, %xmm3
+; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
+; AVX-NEXT: vaddss %xmm3, %xmm4, %xmm3
+; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %a, <4 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x float> %ret
+}
+
+define <2 x double> @f3(<2 x double> %a, <2 x double> %b) #0 {
+; SSE-LABEL: f3:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd %xmm0, %xmm2
+; SSE-NEXT: subsd %xmm1, %xmm2
+; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1,1]
+; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1,1]
+; SSE-NEXT: subsd %xmm1, %xmm0
+; SSE-NEXT: unpcklpd {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; SSE-NEXT: movapd %xmm2, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f3:
+; AVX: # %bb.0:
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; AVX-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm2[0],xmm0[0]
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %a, <2 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %ret
+}
+
+define <4 x float> @f4(<4 x float> %a, <4 x float> %b) #0 {
+; SSE-LABEL: f4:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps %xmm1, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[2,3]
+; SSE-NEXT: movaps %xmm0, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm0[2,3]
+; SSE-NEXT: subss %xmm2, %xmm3
+; SSE-NEXT: movaps %xmm1, %xmm2
+; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm1[1]
+; SSE-NEXT: movaps %xmm0, %xmm4
+; SSE-NEXT: unpckhpd {{.*#+}} xmm4 = xmm4[1],xmm0[1]
+; SSE-NEXT: subss %xmm2, %xmm4
+; SSE-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
+; SSE-NEXT: movaps %xmm0, %xmm2
+; SSE-NEXT: subss %xmm1, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE-NEXT: subss %xmm1, %xmm0
+; SSE-NEXT: unpcklps {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
+; SSE-NEXT: movaps %xmm2, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f4:
+; AVX: # %bb.0:
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm2
+; AVX-NEXT: vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX-NEXT: vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX-NEXT: vsubss %xmm3, %xmm4, %xmm3
+; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[2,3]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm3 = xmm1[1,0]
+; AVX-NEXT: vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
+; AVX-NEXT: vsubss %xmm3, %xmm4, %xmm3
+; AVX-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
+; AVX-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; AVX-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; AVX-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %a, <4 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x float> %ret
+}
+
+define <2 x double> @f5(<2 x double> %a, <2 x double> %b) #0 {
+; SSE-LABEL: f5:
+; SSE: # %bb.0:
+; SSE-NEXT: mulpd %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f5:
+; AVX: # %bb.0:
+; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %a, <2 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %ret
+}
+
+define <4 x float> @f6(<4 x float> %a, <4 x float> %b) #0 {
+; SSE-LABEL: f6:
+; SSE: # %bb.0:
+; SSE-NEXT: mulps %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f6:
+; AVX: # %bb.0:
+; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %a, <4 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x float> %ret
+}
+
+define <2 x double> @f7(<2 x double> %a, <2 x double> %b) #0 {
+; SSE-LABEL: f7:
+; SSE: # %bb.0:
+; SSE-NEXT: divpd %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f7:
+; AVX: # %bb.0:
+; AVX-NEXT: vdivpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %a, <2 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %ret
+}
+
+define <4 x float> @f8(<4 x float> %a, <4 x float> %b) #0 {
+; SSE-LABEL: f8:
+; SSE: # %bb.0:
+; SSE-NEXT: divps %xmm1, %xmm0
+; SSE-NEXT: ret{{[l|q]}}
+;
+; AVX-LABEL: f8:
+; AVX: # %bb.0:
+; AVX-NEXT: vdivps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: ret{{[l|q]}}
+ %ret = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %a, <4 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x float> %ret
+}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/X86/vec-strict-256.ll b/llvm/test/CodeGen/X86/vec-strict-256.ll
new file mode 100644
index 000000000000..23971a140c87
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vec-strict-256.ll
@@ -0,0 +1,184 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
+
+declare <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+declare <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+declare <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+declare <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+declare <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+declare <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+declare <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double>, <4 x double>, metadata, metadata)
+declare <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float>, <8 x float>, metadata, metadata)
+
+define <4 x double> @f1(<4 x double> %a, <4 x double> %b) #0 {
+; CHECK-LABEL: f1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vaddsd %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vaddsd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(<4 x double> %a, <4 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x double> %ret
+}
+
+define <8 x float> @f2(<8 x float> %a, <8 x float> %b) #0 {
+; CHECK-LABEL: f2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vaddss %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vaddss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vaddss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vaddss %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[0]
+; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; CHECK-NEXT: vaddss %xmm4, %xmm5, %xmm4
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
+; CHECK-NEXT: vaddss %xmm4, %xmm5, %xmm4
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x float> @llvm.experimental.constrained.fadd.v8f32(<8 x float> %a, <8 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x float> %ret
+}
+
+define <4 x double> @f3(<4 x double> %a, <4 x double> %b) #0 {
+; CHECK-LABEL: f3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vsubsd %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vsubsd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; CHECK-NEXT: vsubsd %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; CHECK-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm3[0],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(<4 x double> %a, <4 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x double> %ret
+}
+
+define <8 x float> @f4(<8 x float> %a, <8 x float> %b) #0 {
+; CHECK-LABEL: f4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
+; CHECK-NEXT: vsubss %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vsubss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vsubss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vsubss %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[0]
+; CHECK-NEXT: vsubss %xmm1, %xmm0, %xmm3
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; CHECK-NEXT: vsubss %xmm4, %xmm5, %xmm4
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm0[1,0]
+; CHECK-NEXT: vsubss %xmm4, %xmm5, %xmm4
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; CHECK-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x float> @llvm.experimental.constrained.fsub.v8f32(<8 x float> %a, <8 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x float> %ret
+}
+
+define <4 x double> @f5(<4 x double> %a, <4 x double> %b) #0 {
+; CHECK-LABEL: f5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(<4 x double> %a, <4 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x double> %ret
+}
+
+define <8 x float> @f6(<8 x float> %a, <8 x float> %b) #0 {
+; CHECK-LABEL: f6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x float> @llvm.experimental.constrained.fmul.v8f32(<8 x float> %a, <8 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x float> %ret
+}
+
+define <4 x double> @f7(<4 x double> %a, <4 x double> %b) #0 {
+; CHECK-LABEL: f7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vdivpd %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(<4 x double> %a, <4 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x double> %ret
+}
+
+define <8 x float> @f8(<8 x float> %a, <8 x float> %b) #0 {
+; CHECK-LABEL: f8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vdivps %ymm1, %ymm0, %ymm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x float> @llvm.experimental.constrained.fdiv.v8f32(<8 x float> %a, <8 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x float> %ret
+}
+
+attributes #0 = { strictfp }
diff --git a/llvm/test/CodeGen/X86/vec-strict-512.ll b/llvm/test/CodeGen/X86/vec-strict-512.ll
new file mode 100644
index 000000000000..d4d9538f8705
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vec-strict-512.ll
@@ -0,0 +1,278 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s
+
+declare <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+declare <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+declare <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+declare <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+declare <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+declare <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double>, <8 x double>, metadata, metadata)
+declare <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float>, <16 x float>, metadata, metadata)
+
+define <8 x double> @f1(<8 x double> %a, <8 x double> %b) #0 {
+; CHECK-LABEL: f1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf32x4 $3, %zmm1, %xmm2
+; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm3
+; CHECK-NEXT: vaddsd %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vaddsd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm3
+; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm4
+; CHECK-NEXT: vaddsd %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; CHECK-NEXT: vaddsd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm5[0],xmm3[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm4
+; CHECK-NEXT: vaddsd %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; CHECK-NEXT: vaddsd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm5[0],xmm3[0]
+; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; CHECK-NEXT: vaddsd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm4[0],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x double> @llvm.experimental.constrained.fadd.v8f64(<8 x double> %a, <8 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x double> %ret
+}
+
+define <16 x float> @f2(<16 x float> %a, <16 x float> %b) #0 {
+; CHECK-LABEL: f2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf32x4 $3, %zmm1, %xmm2
+; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm3
+; CHECK-NEXT: vaddss %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vaddss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vaddss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vaddss %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[0]
+; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm3
+; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm4
+; CHECK-NEXT: vaddss %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; CHECK-NEXT: vaddss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; CHECK-NEXT: vaddss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; CHECK-NEXT: vaddss %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm4
+; CHECK-NEXT: vaddss %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; CHECK-NEXT: vaddss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; CHECK-NEXT: vaddss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; CHECK-NEXT: vaddss %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[0]
+; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; CHECK-NEXT: vaddss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
+; CHECK-NEXT: vaddss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; CHECK-NEXT: vaddss %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float> %a, <16 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <16 x float> %ret
+}
+
+define <8 x double> @f3(<8 x double> %a, <8 x double> %b) #0 {
+; CHECK-LABEL: f3:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf32x4 $3, %zmm1, %xmm2
+; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm3
+; CHECK-NEXT: vsubsd %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm2 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vsubsd %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm4[0],xmm2[0]
+; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm3
+; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm4
+; CHECK-NEXT: vsubsd %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; CHECK-NEXT: vsubsd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm5[0],xmm3[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm4
+; CHECK-NEXT: vsubsd %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
+; CHECK-NEXT: vsubsd %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm5[0],xmm3[0]
+; CHECK-NEXT: vsubsd %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
+; CHECK-NEXT: vsubsd %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm4[0],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x double> @llvm.experimental.constrained.fsub.v8f64(<8 x double> %a, <8 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x double> %ret
+}
+
+define <16 x float> @f4(<16 x float> %a, <16 x float> %b) #0 {
+; CHECK-LABEL: f4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vextractf32x4 $3, %zmm1, %xmm2
+; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm3
+; CHECK-NEXT: vsubss %xmm2, %xmm3, %xmm4
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vsubss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm2[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vsubss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vsubss %xmm2, %xmm3, %xmm2
+; CHECK-NEXT: vinsertps {{.*#+}} xmm2 = xmm4[0,1,2],xmm2[0]
+; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm3
+; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm4
+; CHECK-NEXT: vsubss %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; CHECK-NEXT: vsubss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; CHECK-NEXT: vsubss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; CHECK-NEXT: vsubss %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm3
+; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm4
+; CHECK-NEXT: vsubss %xmm3, %xmm4, %xmm5
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm7 = xmm4[1,1,3,3]
+; CHECK-NEXT: vsubss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm3[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm7 = xmm4[1,0]
+; CHECK-NEXT: vsubss %xmm6, %xmm7, %xmm6
+; CHECK-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm6[0],xmm5[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[3,1,2,3]
+; CHECK-NEXT: vsubss %xmm3, %xmm4, %xmm3
+; CHECK-NEXT: vinsertps {{.*#+}} xmm3 = xmm5[0,1,2],xmm3[0]
+; CHECK-NEXT: vsubss %xmm1, %xmm0, %xmm4
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
+; CHECK-NEXT: vmovshdup {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; CHECK-NEXT: vsubss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[2,3]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
+; CHECK-NEXT: vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
+; CHECK-NEXT: vsubss %xmm5, %xmm6, %xmm5
+; CHECK-NEXT: vinsertps {{.*#+}} xmm4 = xmm4[0,1],xmm5[0],xmm4[3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
+; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
+; CHECK-NEXT: vsubss %xmm1, %xmm0, %xmm0
+; CHECK-NEXT: vinsertps {{.*#+}} xmm0 = xmm4[0,1,2],xmm0[0]
+; CHECK-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
+; CHECK-NEXT: vinsertf64x4 $1, %ymm2, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <16 x float> @llvm.experimental.constrained.fsub.v16f32(<16 x float> %a, <16 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <16 x float> %ret
+}
+
+define <8 x double> @f5(<8 x double> %a, <8 x double> %b) #0 {
+; CHECK-LABEL: f5:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmulpd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x double> @llvm.experimental.constrained.fmul.v8f64(<8 x double> %a, <8 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x double> %ret
+}
+
+define <16 x float> @f6(<16 x float> %a, <16 x float> %b) #0 {
+; CHECK-LABEL: f6:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmulps %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <16 x float> @llvm.experimental.constrained.fmul.v16f32(<16 x float> %a, <16 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <16 x float> %ret
+}
+
+define <8 x double> @f7(<8 x double> %a, <8 x double> %b) #0 {
+; CHECK-LABEL: f7:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vdivpd %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <8 x double> @llvm.experimental.constrained.fdiv.v8f64(<8 x double> %a, <8 x double> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <8 x double> %ret
+}
+
+define <16 x float> @f8(<16 x float> %a, <16 x float> %b) #0 {
+; CHECK-LABEL: f8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vdivps %zmm1, %zmm0, %zmm0
+; CHECK-NEXT: ret{{[l|q]}}
+ %ret = call <16 x float> @llvm.experimental.constrained.fdiv.v16f32(<16 x float> %a, <16 x float> %b,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <16 x float> %ret
+}
+
+attributes #0 = { strictfp }
More information about the llvm-commits
mailing list