[llvm] r359461 - Add AVX support to this test.
Kevin P. Neal via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 29 09:06:05 PDT 2019
Author: kpn
Date: Mon Apr 29 09:06:04 2019
New Revision: 359461
URL: http://llvm.org/viewvc/llvm-project?rev=359461&view=rev
Log:
Add AVX support to this test.
Requested by Craig Topper and Andrew Kaylor as part of D55897.
Modified:
llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
Modified: llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll?rev=359461&r1=359460&r2=359461&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll Mon Apr 29 09:06:04 2019
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -O3 -mtriple=x86_64-pc-linux < %s | FileCheck %s
+; RUN: llc -O3 -mtriple=x86_64-pc-linux -mattr=+avx < %s | FileCheck --check-prefix=AVX %s
define <1 x float> @constrained_vector_fdiv_v1f32() {
; CHECK-LABEL: constrained_vector_fdiv_v1f32:
@@ -7,6 +8,12 @@ define <1 x float> @constrained_vector_f
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: divss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fdiv_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vdivss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%div = call <1 x float> @llvm.experimental.constrained.fdiv.v1f32(
<1 x float> <float 1.000000e+00>,
@@ -22,6 +29,12 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
; CHECK-NEXT: divpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fdiv_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.0E+0,2.0E+0]
+; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%div = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
@@ -44,6 +57,19 @@ define <3 x float> @constrained_vector_f
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fdiv_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm1
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vdivss %xmm0, %xmm2, %xmm2
+; AVX-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
+; AVX-NEXT: vdivss %xmm0, %xmm3, %xmm0
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX-NEXT: retq
entry:
%div = call <3 x float> @llvm.experimental.constrained.fdiv.v3f32(
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
@@ -65,6 +91,15 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fdiv_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vdivsd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [1.0E+0,2.0E+0]
+; AVX-NEXT: vdivpd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%div = call <3 x double> @llvm.experimental.constrained.fdiv.v3f64(
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
@@ -83,6 +118,12 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: movapd {{.*#+}} xmm1 = [3.0E+0,4.0E+0]
; CHECK-NEXT: divpd %xmm2, %xmm1
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fdiv_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.0E+0,2.0E+0,3.0E+0,4.0E+0]
+; AVX-NEXT: vdivpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
entry:
%div = call <4 x double> @llvm.experimental.constrained.fdiv.v4f64(
<4 x double> <double 1.000000e+00, double 2.000000e+00,
@@ -105,6 +146,17 @@ define <1 x float> @constrained_vector_f
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_frem_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmodf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%rem = call <1 x float> @llvm.experimental.constrained.frem.v1f32(
<1 x float> <float 1.000000e+00>,
@@ -131,6 +183,23 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_frem_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%rem = call <2 x double> @llvm.experimental.constrained.frem.v2f64(
<2 x double> <double 1.000000e+00, double 2.000000e+00>,
@@ -164,6 +233,29 @@ define <3 x float> @constrained_vector_f
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_frem_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmodf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmodf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmodf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%rem = call <3 x float> @llvm.experimental.constrained.frem.v3f32(
<3 x float> <float 1.000000e+00, float 2.000000e+00, float 3.000000e+00>,
@@ -198,6 +290,30 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_frem_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%rem = call <3 x double> @llvm.experimental.constrained.frem.v3f64(
<3 x double> <double 1.000000e+00, double 2.000000e+00, double 3.000000e+00>,
@@ -236,6 +352,34 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_frem_v4f64:
+; AVX: # %bb.0:
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmod
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
%rem = call <4 x double> @llvm.experimental.constrained.frem.v4f64(
<4 x double> <double 1.000000e+00, double 2.000000e+00,
double 3.000000e+00, double 4.000000e+00>,
@@ -252,6 +396,12 @@ define <1 x float> @constrained_vector_f
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: mulss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fmul_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%mul = call <1 x float> @llvm.experimental.constrained.fmul.v1f32(
<1 x float> <float 0x7FF0000000000000>,
@@ -267,6 +417,12 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fmul_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; AVX-NEXT: vmulpd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%mul = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
@@ -288,6 +444,16 @@ define <3 x float> @constrained_vector_f
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fmul_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm1
+; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0],xmm0[3]
+; AVX-NEXT: retq
entry:
%mul = call <3 x float> @llvm.experimental.constrained.fmul.v3f32(
<3 x float> <float 0x7FF0000000000000, float 0x7FF0000000000000,
@@ -310,6 +476,15 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fmul_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmulsd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; AVX-NEXT: vmulpd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%mul = call <3 x double> @llvm.experimental.constrained.fmul.v3f64(
<3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -328,6 +503,12 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: mulpd %xmm1, %xmm0
; CHECK-NEXT: mulpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fmul_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308]
+; AVX-NEXT: vmulpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
entry:
%mul = call <4 x double> @llvm.experimental.constrained.fmul.v4f64(
<4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -345,6 +526,12 @@ define <1 x float> @constrained_vector_f
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: addss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fadd_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%add = call <1 x float> @llvm.experimental.constrained.fadd.v1f32(
<1 x float> <float 0x7FF0000000000000>,
@@ -360,6 +547,12 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
; CHECK-NEXT: addpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fadd_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; AVX-NEXT: vaddpd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%add = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(
<2 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF>,
@@ -381,6 +574,17 @@ define <3 x float> @constrained_vector_f
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fadd_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm2
+; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%add = call <3 x float> @llvm.experimental.constrained.fadd.v3f32(
<3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000,
@@ -403,6 +607,15 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fadd_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vaddsd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [1.7976931348623157E+308,1.7976931348623157E+308]
+; AVX-NEXT: vaddpd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%add = call <3 x double> @llvm.experimental.constrained.fadd.v3f64(
<3 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -421,6 +634,12 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addpd %xmm1, %xmm0
; CHECK-NEXT: addpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fadd_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308,1.7976931348623157E+308]
+; AVX-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
entry:
%add = call <4 x double> @llvm.experimental.constrained.fadd.v4f64(
<4 x double> <double 0x7FEFFFFFFFFFFFFF, double 0x7FEFFFFFFFFFFFFF,
@@ -438,6 +657,12 @@ define <1 x float> @constrained_vector_f
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: subss {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fsub_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vsubss {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%sub = call <1 x float> @llvm.experimental.constrained.fsub.v1f32(
<1 x float> <float 0x7FF0000000000000>,
@@ -453,6 +678,12 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: movapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fsub_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} xmm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
+; AVX-NEXT: vsubpd {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%sub = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(
<2 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF>,
@@ -475,6 +706,17 @@ define <3 x float> @constrained_vector_f
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fsub_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vsubss {{.*}}(%rip), %xmm1, %xmm2
+; AVX-NEXT: vsubss {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%sub = call <3 x float> @llvm.experimental.constrained.fsub.v3f32(
<3 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000,
@@ -498,6 +740,16 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fsub_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vxorpd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vmovapd {{.*#+}} xmm1 = [-1.7976931348623157E+308,-1.7976931348623157E+308]
+; AVX-NEXT: vsubpd {{.*}}(%rip), %xmm1, %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%sub = call <3 x double> @llvm.experimental.constrained.fsub.v3f64(
<3 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF,
@@ -516,6 +768,12 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: subpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_fsub_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovapd {{.*#+}} ymm0 = [-1.7976931348623157E+308,-1.7976931348623157E+308,-1.7976931348623157E+308,-1.7976931348623157E+308]
+; AVX-NEXT: vsubpd {{.*}}(%rip), %ymm0, %ymm0
+; AVX-NEXT: retq
entry:
%sub = call <4 x double> @llvm.experimental.constrained.fsub.v4f64(
<4 x double> <double 0xFFEFFFFFFFFFFFFF, double 0xFFEFFFFFFFFFFFFF,
@@ -533,6 +791,12 @@ define <1 x float> @constrained_vector_s
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: sqrtss %xmm0, %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sqrt_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%sqrt = call <1 x float> @llvm.experimental.constrained.sqrt.v1f32(
<1 x float> <float 42.0>,
@@ -546,6 +810,11 @@ define <2 x double> @constrained_vector_
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sqrt_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm0
+; AVX-NEXT: retq
entry:
%sqrt = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(
<2 x double> <double 42.0, double 42.1>,
@@ -566,6 +835,18 @@ define <3 x float> @constrained_vector_s
; CHECK-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sqrt_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vsqrtss %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vsqrtss %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%sqrt = call <3 x float> @llvm.experimental.constrained.sqrt.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -585,6 +866,14 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm0[1]
; CHECK-NEXT: fldl -{{[0-9]+}}(%rsp)
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sqrt_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vsqrtpd {{.*}}(%rip), %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%sqrt = call <3 x double> @llvm.experimental.constrained.sqrt.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -599,6 +888,11 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm0
; CHECK-NEXT: sqrtpd {{.*}}(%rip), %xmm1
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sqrt_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vsqrtpd {{.*}}(%rip), %ymm0
+; AVX-NEXT: retq
entry:
%sqrt = call <4 x double> @llvm.experimental.constrained.sqrt.v4f64(
<4 x double> <double 42.0, double 42.1,
@@ -619,6 +913,17 @@ define <1 x float> @constrained_vector_p
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_pow_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq powf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%pow = call <1 x float> @llvm.experimental.constrained.pow.v1f32(
<1 x float> <float 42.0>,
@@ -645,6 +950,23 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_pow_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%pow = call <2 x double> @llvm.experimental.constrained.pow.v2f64(
<2 x double> <double 42.1, double 42.2>,
@@ -678,6 +1000,29 @@ define <3 x float> @constrained_vector_p
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_pow_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq powf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq powf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq powf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%pow = call <3 x float> @llvm.experimental.constrained.pow.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -712,6 +1057,30 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_pow_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq pow
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%pow = call <3 x double> @llvm.experimental.constrained.pow.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -750,6 +1119,34 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_pow_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq pow
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%pow = call <4 x double> @llvm.experimental.constrained.pow.v4f64(
<4 x double> <double 42.1, double 42.2,
@@ -772,6 +1169,17 @@ define <1 x float> @constrained_vector_p
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_powi_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powisf2
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%powi = call <1 x float> @llvm.experimental.constrained.powi.v1f32(
<1 x float> <float 42.0>,
@@ -798,6 +1206,23 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_powi_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%powi = call <2 x double> @llvm.experimental.constrained.powi.v2f64(
<2 x double> <double 42.1, double 42.2>,
@@ -831,6 +1256,29 @@ define <3 x float> @constrained_vector_p
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_powi_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powisf2
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powisf2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powisf2
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%powi = call <3 x float> @llvm.experimental.constrained.powi.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -865,6 +1313,30 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_powi_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%powi = call <3 x double> @llvm.experimental.constrained.powi.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -903,6 +1375,34 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_powi_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: movl $3, %edi
+; AVX-NEXT: callq __powidf2
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%powi = call <4 x double> @llvm.experimental.constrained.powi.v4f64(
<4 x double> <double 42.1, double 42.2,
@@ -923,6 +1423,16 @@ define <1 x float> @constrained_vector_s
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sin_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq sinf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%sin = call <1 x float> @llvm.experimental.constrained.sin.v1f32(
<1 x float> <float 42.0>,
@@ -946,6 +1456,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sin_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%sin = call <2 x double> @llvm.experimental.constrained.sin.v2f64(
<2 x double> <double 42.0, double 42.1>,
@@ -975,6 +1500,26 @@ define <3 x float> @constrained_vector_s
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sin_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq sinf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq sinf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq sinf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%sin = call <3 x float> @llvm.experimental.constrained.sin.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1005,6 +1550,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sin_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq sin
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%sin = call <3 x double> @llvm.experimental.constrained.sin.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1038,6 +1604,30 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_sin_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq sin
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%sin = call <4 x double> @llvm.experimental.constrained.sin.v4f64(
<4 x double> <double 42.0, double 42.1,
@@ -1057,6 +1647,16 @@ define <1 x float> @constrained_vector_c
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_cos_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq cosf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%cos = call <1 x float> @llvm.experimental.constrained.cos.v1f32(
<1 x float> <float 42.0>,
@@ -1080,6 +1680,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_cos_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%cos = call <2 x double> @llvm.experimental.constrained.cos.v2f64(
<2 x double> <double 42.0, double 42.1>,
@@ -1109,6 +1724,26 @@ define <3 x float> @constrained_vector_c
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_cos_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq cosf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq cosf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq cosf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%cos = call <3 x float> @llvm.experimental.constrained.cos.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1139,6 +1774,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_cos_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq cos
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%cos = call <3 x double> @llvm.experimental.constrained.cos.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1172,6 +1828,30 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_cos_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq cos
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%cos = call <4 x double> @llvm.experimental.constrained.cos.v4f64(
<4 x double> <double 42.0, double 42.1,
@@ -1191,6 +1871,16 @@ define <1 x float> @constrained_vector_e
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq expf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp = call <1 x float> @llvm.experimental.constrained.exp.v1f32(
<1 x float> <float 42.0>,
@@ -1214,6 +1904,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp = call <2 x double> @llvm.experimental.constrained.exp.v2f64(
<2 x double> <double 42.0, double 42.1>,
@@ -1243,6 +1948,26 @@ define <3 x float> @constrained_vector_e
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq expf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq expf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq expf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp = call <3 x float> @llvm.experimental.constrained.exp.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1273,6 +1998,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq exp
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp = call <3 x double> @llvm.experimental.constrained.exp.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1306,6 +2052,30 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp = call <4 x double> @llvm.experimental.constrained.exp.v4f64(
<4 x double> <double 42.0, double 42.1,
@@ -1325,6 +2095,16 @@ define <1 x float> @constrained_vector_e
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp2_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq exp2f
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp2 = call <1 x float> @llvm.experimental.constrained.exp2.v1f32(
<1 x float> <float 42.0>,
@@ -1348,6 +2128,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp2_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp2 = call <2 x double> @llvm.experimental.constrained.exp2.v2f64(
<2 x double> <double 42.1, double 42.0>,
@@ -1377,6 +2172,26 @@ define <3 x float> @constrained_vector_e
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp2_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq exp2f
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq exp2f
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq exp2f
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp2 = call <3 x float> @llvm.experimental.constrained.exp2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1407,6 +2222,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp2_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp2 = call <3 x double> @llvm.experimental.constrained.exp2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1440,6 +2276,30 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_exp2_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq exp2
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%exp2 = call <4 x double> @llvm.experimental.constrained.exp2.v4f64(
<4 x double> <double 42.1, double 42.2,
@@ -1459,6 +2319,16 @@ define <1 x float> @constrained_vector_l
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq logf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log = call <1 x float> @llvm.experimental.constrained.log.v1f32(
<1 x float> <float 42.0>,
@@ -1482,6 +2352,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log = call <2 x double> @llvm.experimental.constrained.log.v2f64(
<2 x double> <double 42.0, double 42.1>,
@@ -1511,6 +2396,26 @@ define <3 x float> @constrained_vector_l
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq logf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq logf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq logf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log = call <3 x float> @llvm.experimental.constrained.log.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1541,6 +2446,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq log
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log = call <3 x double> @llvm.experimental.constrained.log.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1574,6 +2500,30 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log = call <4 x double> @llvm.experimental.constrained.log.v4f64(
<4 x double> <double 42.0, double 42.1,
@@ -1593,6 +2543,16 @@ define <1 x float> @constrained_vector_l
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log10_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log10f
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log10 = call <1 x float> @llvm.experimental.constrained.log10.v1f32(
<1 x float> <float 42.0>,
@@ -1616,6 +2576,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log10_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log10 = call <2 x double> @llvm.experimental.constrained.log10.v2f64(
<2 x double> <double 42.0, double 42.1>,
@@ -1645,6 +2620,26 @@ define <3 x float> @constrained_vector_l
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log10_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log10f
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log10f
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log10f
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log10 = call <3 x float> @llvm.experimental.constrained.log10.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1675,6 +2670,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log10_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq log10
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log10 = call <3 x double> @llvm.experimental.constrained.log10.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1708,6 +2724,30 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log10_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log10
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log10 = call <4 x double> @llvm.experimental.constrained.log10.v4f64(
<4 x double> <double 42.0, double 42.1,
@@ -1727,6 +2767,16 @@ define <1 x float> @constrained_vector_l
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log2_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log2f
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log2 = call <1 x float> @llvm.experimental.constrained.log2.v1f32(
<1 x float> <float 42.0>,
@@ -1750,6 +2800,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log2_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log2 = call <2 x double> @llvm.experimental.constrained.log2.v2f64(
<2 x double> <double 42.0, double 42.1>,
@@ -1779,6 +2844,26 @@ define <3 x float> @constrained_vector_l
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log2_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log2f
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log2f
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq log2f
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log2 = call <3 x float> @llvm.experimental.constrained.log2.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1809,6 +2894,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log2_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq log2
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log2 = call <3 x double> @llvm.experimental.constrained.log2.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1842,6 +2948,30 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_log2_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq log2
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%log2 = call <4 x double> @llvm.experimental.constrained.log2.v4f64(
<4 x double> <double 42.0, double 42.1,
@@ -1861,6 +2991,12 @@ define <1 x float> @constrained_vector_r
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_rint_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%rint = call <1 x float> @llvm.experimental.constrained.rint.v1f32(
<1 x float> <float 42.0>,
@@ -1884,6 +3020,11 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_rint_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm0
+; AVX-NEXT: retq
entry:
%rint = call <2 x double> @llvm.experimental.constrained.rint.v2f64(
<2 x double> <double 42.1, double 42.0>,
@@ -1913,6 +3054,18 @@ define <3 x float> @constrained_vector_r
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_rint_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $4, %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $4, %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%rint = call <3 x float> @llvm.experimental.constrained.rint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -1943,6 +3096,14 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_rint_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vroundsd $4, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%rint = call <3 x double> @llvm.experimental.constrained.rint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -1976,6 +3137,11 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_rint_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vroundpd $4, {{.*}}(%rip), %ymm0
+; AVX-NEXT: retq
entry:
%rint = call <4 x double> @llvm.experimental.constrained.rint.v4f64(
<4 x double> <double 42.1, double 42.2,
@@ -1995,6 +3161,12 @@ define <1 x float> @constrained_vector_n
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_nearbyint_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%nearby = call <1 x float> @llvm.experimental.constrained.nearbyint.v1f32(
<1 x float> <float 42.0>,
@@ -2018,6 +3190,11 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_nearbyint_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm0
+; AVX-NEXT: retq
entry:
%nearby = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(
<2 x double> <double 42.1, double 42.0>,
@@ -2047,6 +3224,18 @@ define <3 x float> @constrained_vector_n
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_nearbyint_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $12, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $12, %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $12, %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%nearby = call <3 x float> @llvm.experimental.constrained.nearbyint.v3f32(
<3 x float> <float 42.0, float 43.0, float 44.0>,
@@ -2077,6 +3266,14 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_nearby_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vroundsd $12, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%nearby = call <3 x double> @llvm.experimental.constrained.nearbyint.v3f64(
<3 x double> <double 42.0, double 42.1, double 42.2>,
@@ -2110,6 +3307,11 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_nearbyint_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vroundpd $12, {{.*}}(%rip), %ymm0
+; AVX-NEXT: retq
entry:
%nearby = call <4 x double> @llvm.experimental.constrained.nearbyint.v4f64(
<4 x double> <double 42.1, double 42.2,
@@ -2130,6 +3332,17 @@ define <1 x float> @constrained_vector_m
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_maxnum_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmaxf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%max = call <1 x float> @llvm.experimental.constrained.maxnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
@@ -2155,6 +3368,23 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_maxnum_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%max = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(
<2 x double> <double 43.0, double 42.0>,
@@ -2188,6 +3418,29 @@ define <3 x float> @constrained_vector_m
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_maxnum_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmaxf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmaxf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fmaxf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%max = call <3 x float> @llvm.experimental.constrained.maxnum.v3f32(
<3 x float> <float 43.0, float 44.0, float 45.0>,
@@ -2222,6 +3475,30 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_max_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%max = call <3 x double> @llvm.experimental.constrained.maxnum.v3f64(
<3 x double> <double 43.0, double 44.0, double 45.0>,
@@ -2260,6 +3537,34 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_maxnum_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmax
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%max = call <4 x double> @llvm.experimental.constrained.maxnum.v4f64(
<4 x double> <double 44.0, double 45.0,
@@ -2282,6 +3587,17 @@ define <1 x float> @constrained_vector_m
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_minnum_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fminf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%min = call <1 x float> @llvm.experimental.constrained.minnum.v1f32(
<1 x float> <float 42.0>, <1 x float> <float 41.0>,
@@ -2307,6 +3623,23 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_minnum_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%min = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(
<2 x double> <double 43.0, double 42.0>,
@@ -2340,6 +3673,29 @@ define <3 x float> @constrained_vector_m
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_minnum_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fminf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fminf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: callq fminf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%min = call <3 x float> @llvm.experimental.constrained.minnum.v3f32(
<3 x float> <float 43.0, float 44.0, float 45.0>,
@@ -2374,6 +3730,30 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_min_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%min = call <3 x double> @llvm.experimental.constrained.minnum.v3f64(
<3 x double> <double 43.0, double 44.0, double 45.0>,
@@ -2412,6 +3792,34 @@ define <4 x double> @constrained_vector_
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_minnum_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; AVX-NEXT: callq fmin
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%min = call <4 x double> @llvm.experimental.constrained.minnum.v4f64(
<4 x double> <double 44.0, double 45.0,
@@ -2433,6 +3841,12 @@ define <1 x float> @constrained_vector_c
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_ceil_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%ceil = call <1 x float> @llvm.experimental.constrained.ceil.v1f32(
<1 x float> <float 1.5>,
@@ -2456,6 +3870,11 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_ceil_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vroundpd $10, {{.*}}(%rip), %xmm0
+; AVX-NEXT: retq
entry:
%ceil = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(
<2 x double> <double 1.1, double 1.9>,
@@ -2485,6 +3904,18 @@ define <3 x float> @constrained_vector_c
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_ceil_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $10, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $10, %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $10, %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%ceil = call <3 x float> @llvm.experimental.constrained.ceil.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
@@ -2515,6 +3946,14 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_ceil_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vroundsd $10, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $10, {{.*}}(%rip), %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%ceil = call <3 x double> @llvm.experimental.constrained.ceil.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
@@ -2533,6 +3972,12 @@ define <1 x float> @constrained_vector_f
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_floor_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%floor = call <1 x float> @llvm.experimental.constrained.floor.v1f32(
<1 x float> <float 1.5>,
@@ -2557,6 +4002,11 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_floor_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vroundpd $9, {{.*}}(%rip), %xmm0
+; AVX-NEXT: retq
entry:
%floor = call <2 x double> @llvm.experimental.constrained.floor.v2f64(
<2 x double> <double 1.1, double 1.9>,
@@ -2586,6 +4036,18 @@ define <3 x float> @constrained_vector_f
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_floor_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $9, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $9, %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $9, %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%floor = call <3 x float> @llvm.experimental.constrained.floor.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
@@ -2616,6 +4078,14 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_floor_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vroundsd $9, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $9, {{.*}}(%rip), %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%floor = call <3 x double> @llvm.experimental.constrained.floor.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
@@ -2634,6 +4104,16 @@ define <1 x float> @constrained_vector_r
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_round_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq roundf
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%round = call <1 x float> @llvm.experimental.constrained.round.v1f32(
<1 x float> <float 1.5>,
@@ -2657,6 +4137,21 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_round_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq round
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq round
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%round = call <2 x double> @llvm.experimental.constrained.round.v2f64(
<2 x double> <double 1.1, double 1.9>,
@@ -2686,6 +4181,26 @@ define <3 x float> @constrained_vector_r
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_round_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq roundf
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq roundf
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: callq roundf
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%round = call <3 x float> @llvm.experimental.constrained.round.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
@@ -2717,6 +4232,27 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_round_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 64
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq round
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: callq round
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq round
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $56, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
entry:
%round = call <3 x double> @llvm.experimental.constrained.round.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
@@ -2735,6 +4271,12 @@ define <1 x float> @constrained_vector_t
; CHECK-NEXT: popq %rax
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_trunc_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
entry:
%trunc = call <1 x float> @llvm.experimental.constrained.trunc.v1f32(
<1 x float> <float 1.5>,
@@ -2758,6 +4300,11 @@ define <2 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_trunc_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vroundpd $11, {{.*}}(%rip), %xmm0
+; AVX-NEXT: retq
entry:
%trunc = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(
<2 x double> <double 1.1, double 1.9>,
@@ -2787,6 +4334,18 @@ define <3 x float> @constrained_vector_t
; CHECK-NEXT: addq $40, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_trunc_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $11, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $11, %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
+; AVX-NEXT: vroundss $11, %xmm2, %xmm2, %xmm2
+; AVX-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0,1],xmm0[0],xmm1[3]
+; AVX-NEXT: retq
entry:
%trunc = call <3 x float> @llvm.experimental.constrained.trunc.v3f32(
<3 x float> <float 1.5, float 2.5, float 3.5>,
@@ -2817,6 +4376,14 @@ define <3 x double> @constrained_vector_
; CHECK-NEXT: addq $24, %rsp
; CHECK-NEXT: .cfi_def_cfa_offset 8
; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_trunc_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; AVX-NEXT: vroundsd $11, %xmm0, %xmm0, %xmm0
+; AVX-NEXT: vroundpd $11, {{.*}}(%rip), %xmm1
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: retq
entry:
%trunc = call <3 x double> @llvm.experimental.constrained.trunc.v3f64(
<3 x double> <double 1.1, double 1.9, double 1.5>,
@@ -2948,4 +4515,3 @@ declare <4 x double> @llvm.experimental.
declare <4 x double> @llvm.experimental.constrained.floor.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.round.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.trunc.v4f64(<4 x double>, metadata, metadata)
-
More information about the llvm-commits
mailing list