[clang] [llvm] [clang] Reland Add tanf16 builtin and support for tan constrained intrinsic (PR #94559)
Farzon Lotfi via cfe-commits
cfe-commits at lists.llvm.org
Fri Jun 7 13:19:41 PDT 2024
https://github.com/farzonl updated https://github.com/llvm/llvm-project/pull/94559
>From 51247e430ad49c4729e2e3664104367b13fbad9e Mon Sep 17 00:00:00 2001
From: Farzon Lotfi <farzonlotfi at microsoft.com>
Date: Fri, 24 May 2024 10:01:52 -0400
Subject: [PATCH 1/3] [clang] Add tanf16 builtin and support for tan
constrained intrinsic
---
clang/include/clang/Basic/Builtins.td | 6 ++--
clang/lib/CodeGen/CGBuiltin.cpp | 12 +++++++
clang/test/CodeGen/X86/math-builtins.c | 8 ++---
.../test/CodeGen/constrained-math-builtins.c | 13 +++++++
clang/test/CodeGen/math-libcalls.c | 12 +++----
clang/test/CodeGenOpenCL/builtins-f16.cl | 3 ++
llvm/docs/LangRef.rst | 36 +++++++++++++++++++
llvm/include/llvm/IR/ConstrainedOps.def | 1 +
llvm/include/llvm/IR/Intrinsics.td | 4 +++
llvm/test/Assembler/fp-intrinsics-attr.ll | 8 +++++
llvm/test/Feature/fp-intrinsics.ll | 11 ++++++
11 files changed, 101 insertions(+), 13 deletions(-)
diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td
index 11982af3fa609..7bef5fd7ad40f 100644
--- a/clang/include/clang/Basic/Builtins.td
+++ b/clang/include/clang/Basic/Builtins.td
@@ -482,11 +482,11 @@ def SqrtF16F128 : Builtin, F16F128MathTemplate {
let Prototype = "T(T)";
}
-def TanF128 : Builtin {
- let Spellings = ["__builtin_tanf128"];
+def TanF16F128 : Builtin, F16F128MathTemplate {
+ let Spellings = ["__builtin_tan"];
let Attributes = [FunctionWithBuiltinPrefix, NoThrow,
ConstIgnoringErrnoAndExceptions];
- let Prototype = "__float128(__float128)";
+ let Prototype = "T(T)";
}
def TanhF128 : Builtin {
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index c16b69ba87567..06e201fa71e6f 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -2923,6 +2923,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
SetSqrtFPAccuracy(Call);
return RValue::get(Call);
}
+
+ case Builtin::BItan:
+ case Builtin::BItanf:
+ case Builtin::BItanl:
+ case Builtin::BI__builtin_tan:
+ case Builtin::BI__builtin_tanf:
+ case Builtin::BI__builtin_tanf16:
+ case Builtin::BI__builtin_tanl:
+ case Builtin::BI__builtin_tanf128:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
+
case Builtin::BItrunc:
case Builtin::BItruncf:
case Builtin::BItruncl:
diff --git a/clang/test/CodeGen/X86/math-builtins.c b/clang/test/CodeGen/X86/math-builtins.c
index 093239b448260..1e0f129b98610 100644
--- a/clang/test/CodeGen/X86/math-builtins.c
+++ b/clang/test/CodeGen/X86/math-builtins.c
@@ -674,10 +674,10 @@ __builtin_sqrt(f); __builtin_sqrtf(f); __builtin_sqrtl(f); __builtin_
__builtin_tan(f); __builtin_tanf(f); __builtin_tanl(f); __builtin_tanf128(f);
-// NO__ERRNO: declare double @tan(double noundef) [[READNONE]]
-// NO__ERRNO: declare float @tanf(float noundef) [[READNONE]]
-// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[READNONE]]
-// NO__ERRNO: declare fp128 @tanf128(fp128 noundef) [[READNONE]]
+// NO__ERRNO: declare double @llvm.tan.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.tan.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.tan.f80(x86_fp80) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare fp128 @llvm.tan.f128(fp128) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @tan(double noundef) [[NOT_READNONE]]
// HAS_ERRNO: declare float @tanf(float noundef) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[NOT_READNONE]]
diff --git a/clang/test/CodeGen/constrained-math-builtins.c b/clang/test/CodeGen/constrained-math-builtins.c
index 2de832dd2b6ca..6cc3a10a1e794 100644
--- a/clang/test/CodeGen/constrained-math-builtins.c
+++ b/clang/test/CodeGen/constrained-math-builtins.c
@@ -183,6 +183,14 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c, _
// CHECK: call x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
// CHECK: call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+ __builtin_tan(f); __builtin_tanf(f); __builtin_tanl(f); __builtin_tanf128(f);
+
+// CHECK: call double @llvm.experimental.constrained.tan.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call float @llvm.experimental.constrained.tan.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+// CHECK: call fp128 @llvm.experimental.constrained.tan.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
+
+
__builtin_trunc(f); __builtin_truncf(f); __builtin_truncl(f); __builtin_truncf128(f);
// CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict")
@@ -315,6 +323,11 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c, _
// CHECK: declare x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80, metadata, metadata)
// CHECK: declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
+// CHECK: declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
+// CHECK: declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
+// CHECK: declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
+// CHECK: declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
+
// CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
// CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
// CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
diff --git a/clang/test/CodeGen/math-libcalls.c b/clang/test/CodeGen/math-libcalls.c
index 29c312ba0ecac..a249182692762 100644
--- a/clang/test/CodeGen/math-libcalls.c
+++ b/clang/test/CodeGen/math-libcalls.c
@@ -662,15 +662,15 @@ void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
tan(f); tanf(f); tanl(f);
-// NO__ERRNO: declare double @tan(double noundef) [[READNONE]]
-// NO__ERRNO: declare float @tanf(float noundef) [[READNONE]]
-// NO__ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[READNONE]]
+// NO__ERRNO: declare double @llvm.tan.f64(double) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare float @llvm.tan.f32(float) [[READNONE_INTRINSIC]]
+// NO__ERRNO: declare x86_fp80 @llvm.tan.f80(x86_fp80) [[READNONE_INTRINSIC]]
// HAS_ERRNO: declare double @tan(double noundef) [[NOT_READNONE]]
// HAS_ERRNO: declare float @tanf(float noundef) [[NOT_READNONE]]
// HAS_ERRNO: declare x86_fp80 @tanl(x86_fp80 noundef) [[NOT_READNONE]]
-// HAS_MAYTRAP: declare double @tan(double noundef) [[NOT_READNONE]]
-// HAS_MAYTRAP: declare float @tanf(float noundef) [[NOT_READNONE]]
-// HAS_MAYTRAP: declare x86_fp80 @tanl(x86_fp80 noundef) [[NOT_READNONE]]
+// HAS_MAYTRAP: declare double @llvm.experimental.constrained.tan.f64(
+// HAS_MAYTRAP: declare float @llvm.experimental.constrained.tan.f32(
+// HAS_MAYTRAP: declare x86_fp80 @llvm.experimental.constrained.tan.f80(
tanh(f); tanhf(f); tanhl(f);
diff --git a/clang/test/CodeGenOpenCL/builtins-f16.cl b/clang/test/CodeGenOpenCL/builtins-f16.cl
index adf7cdde154f5..d7bffdad5c548 100644
--- a/clang/test/CodeGenOpenCL/builtins-f16.cl
+++ b/clang/test/CodeGenOpenCL/builtins-f16.cl
@@ -66,6 +66,9 @@ void test_half_builtins(half h0, half h1, half h2, int i0) {
// CHECK: call half @llvm.sqrt.f16(half %h0)
res = __builtin_sqrtf16(h0);
+ // CHECK: call half @llvm.tan.f16(half %h0)
+ res = __builtin_tanf16(h0);
+
// CHECK: call half @llvm.trunc.f16(half %h0)
res = __builtin_truncf16(h0);
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 9fb2c048a5c86..c11a6627d81d3 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -26244,6 +26244,42 @@ same values as the libm ``cos`` functions would, and handles error
conditions in the same way.
+'``llvm.experimental.constrained.tan``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare <type>
+ @llvm.experimental.constrained.tan(<type> <op1>,
+ metadata <rounding mode>,
+ metadata <exception behavior>)
+
+Overview:
+"""""""""
+
+The '``llvm.experimental.constrained.tan``' intrinsic returns the tangent of the
+first operand.
+
+Arguments:
+""""""""""
+
+The first argument and the return type are floating-point numbers of the same
+type.
+
+The second and third arguments specify the rounding mode and exception
+behavior as described above.
+
+Semantics:
+""""""""""
+
+This function returns the tangent of the specified operand, returning the
+same values as the libm ``tan`` functions would, and handles error
+conditions in the same way.
+
+
'``llvm.experimental.constrained.exp``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/llvm/include/llvm/IR/ConstrainedOps.def b/llvm/include/llvm/IR/ConstrainedOps.def
index 41aa44de957f9..a7b37c5cb204d 100644
--- a/llvm/include/llvm/IR/ConstrainedOps.def
+++ b/llvm/include/llvm/IR/ConstrainedOps.def
@@ -95,6 +95,7 @@ DAG_FUNCTION(round, 1, 0, experimental_constrained_round, FROUND)
DAG_FUNCTION(roundeven, 1, 0, experimental_constrained_roundeven, FROUNDEVEN)
DAG_FUNCTION(sin, 1, 1, experimental_constrained_sin, FSIN)
DAG_FUNCTION(sqrt, 1, 1, experimental_constrained_sqrt, FSQRT)
+DAG_FUNCTION(tan, 1, 1, experimental_constrained_tan, FTAN)
DAG_FUNCTION(trunc, 1, 0, experimental_constrained_trunc, FTRUNC)
// This is definition for fmuladd intrinsic function, that is converted into
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 107442623ab7b..4c506a6ace23e 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1218,6 +1218,10 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn, IntrStrictFP] in
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
+ def int_experimental_constrained_tan : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
+ [ LLVMMatchType<0>,
+ llvm_metadata_ty,
+ llvm_metadata_ty ]>;
def int_experimental_constrained_pow : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
LLVMMatchType<0>,
diff --git a/llvm/test/Assembler/fp-intrinsics-attr.ll b/llvm/test/Assembler/fp-intrinsics-attr.ll
index 6546d1a275c99..613630e1a2b4d 100644
--- a/llvm/test/Assembler/fp-intrinsics-attr.ll
+++ b/llvm/test/Assembler/fp-intrinsics-attr.ll
@@ -85,6 +85,11 @@ define void @func(double %a, double %b, double %c, i32 %i) strictfp {
metadata !"round.dynamic",
metadata !"fpexcept.strict")
+ %tan = call double @llvm.experimental.constrained.tan.f64(
+ double %a,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict")
+
%pow = call double @llvm.experimental.constrained.pow.f64(
double %a, double %b,
metadata !"round.dynamic",
@@ -244,6 +249,9 @@ declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata
declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
; CHECK: @llvm.experimental.constrained.cos.f64({{.*}}) #[[ATTR1]]
+declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
+; CHECK: @llvm.experimental.constrained.tan.f64({{.*}}) #[[ATTR1]]
+
declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
; CHECK: @llvm.experimental.constrained.pow.f64({{.*}}) #[[ATTR1]]
diff --git a/llvm/test/Feature/fp-intrinsics.ll b/llvm/test/Feature/fp-intrinsics.ll
index b92408a1bf1cd..7759813dc2e11 100644
--- a/llvm/test/Feature/fp-intrinsics.ll
+++ b/llvm/test/Feature/fp-intrinsics.ll
@@ -151,6 +151,17 @@ entry:
ret double %result
}
+; Verify that tan(42.0) isn't simplified when the rounding mode is unknown.
+; CHECK-LABEL: ftan
+; CHECK: call double @llvm.experimental.constrained.tan
+define double @ftan() #0 {
+entry:
+ %result = call double @llvm.experimental.constrained.tan.f64(double 42.0,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %result
+}
+
; Verify that exp(42.0) isn't simplified when the rounding mode is unknown.
; CHECK-LABEL: f10
; CHECK: call double @llvm.experimental.constrained.exp
>From 7772ca4f8ffe63cb4573f4d4edf2cd2895bff8b8 Mon Sep 17 00:00:00 2001
From: Farzon Lotfi <farzonlotfi at microsoft.com>
Date: Thu, 6 Jun 2024 13:07:48 -0400
Subject: [PATCH 2/3] add arm64 llvm.experimental.constrained.tan tests
---
llvm/test/CodeGen/AArch64/fp-intrinsics.ll | 31 ++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
index 685efbb7cad43..67d0b63f4076f 100644
--- a/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/AArch64/fp-intrinsics.ll
@@ -146,6 +146,13 @@ define float @cos_f32(float %x) #0 {
ret float %val
}
+; CHECK-LABEL: tan_f32:
+; CHECK: bl tanf
+define float @tan_f32(float %x) #0 {
+ %val = call float @llvm.experimental.constrained.tan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
; CHECK-LABEL: pow_f32:
; CHECK: bl powf
define float @pow_f32(float %x, float %y) #0 {
@@ -630,6 +637,13 @@ define double @cos_f64(double %x) #0 {
ret double %val
}
+; CHECK-LABEL: tan_f64:
+; CHECK: bl tan
+define double @tan_f64(double %x) #0 {
+ %val = call double @llvm.experimental.constrained.tan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
; CHECK-LABEL: pow_f64:
; CHECK: bl pow
define double @pow_f64(double %x, double %y) #0 {
@@ -1114,6 +1128,13 @@ define fp128 @cos_f128(fp128 %x) #0 {
ret fp128 %val
}
+; CHECK-LABEL: tan_f128:
+; CHECK: bl tanl
+define fp128 @tan_f128(fp128 %x) #0 {
+ %val = call fp128 @llvm.experimental.constrained.tan.f128(fp128 %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret fp128 %val
+}
+
; CHECK-LABEL: pow_f128:
; CHECK: bl powl
define fp128 @pow_f128(fp128 %x, fp128 %y) #0 {
@@ -1491,6 +1512,13 @@ define <1 x double> @cos_v1f64(<1 x double> %x, <1 x double> %y) #0 {
ret <1 x double> %val
}
+; CHECK-LABEL: tan_v1f64:
+; CHECK: bl tan
+define <1 x double> @tan_v1f64(<1 x double> %x, <1 x double> %y) #0 {
+ %val = call <1 x double> @llvm.experimental.constrained.tan.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret <1 x double> %val
+}
+
; CHECK-LABEL: pow_v1f64:
; CHECK: bl pow
define <1 x double> @pow_v1f64(<1 x double> %x, <1 x double> %y) #0 {
@@ -1555,6 +1583,7 @@ declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
@@ -1599,6 +1628,7 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
@@ -1643,6 +1673,7 @@ declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata
declare fp128 @llvm.experimental.constrained.powi.f128(fp128, i32, metadata, metadata)
declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.cos.f128(fp128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.pow.f128(fp128, fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.log.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.log10.f128(fp128, metadata, metadata)
>From 35bbeed2f989eca93771822b414e11e88e4bbc16 Mon Sep 17 00:00:00 2001
From: Farzon Lotfi <farzon at farzon.com>
Date: Fri, 7 Jun 2024 16:19:08 -0400
Subject: [PATCH 3/3] now that x86 and arm64 backends landed, add constrained
tests for those backends
---
llvm/test/CodeGen/ARM/fp-intrinsics.ll | 16 ++
llvm/test/CodeGen/X86/fp-intrinsics.ll | 53 ++++
.../CodeGen/X86/fp-strict-libcalls-msvc32.ll | 18 ++
.../test/CodeGen/X86/fp128-libcalls-strict.ll | 41 +++
llvm/test/CodeGen/X86/fp80-strict-libcalls.ll | 26 ++
.../X86/vector-constrained-fp-intrinsics.ll | 233 +++++++++++++++++-
6 files changed, 386 insertions(+), 1 deletion(-)
diff --git a/llvm/test/CodeGen/ARM/fp-intrinsics.ll b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
index 64b22a5cc71bc..e286eb3226e46 100644
--- a/llvm/test/CodeGen/ARM/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/ARM/fp-intrinsics.ll
@@ -139,6 +139,13 @@ define float @cos_f32(float %x) #0 {
ret float %val
}
+; CHECK-LABEL: tan_f32:
+; CHECK: bl tanf
+define float @tan_f32(float %x) #0 {
+ %val = call float @llvm.experimental.constrained.tan.f32(float %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret float %val
+}
+
; CHECK-LABEL: pow_f32:
; CHECK: bl powf
define float @pow_f32(float %x, float %y) #0 {
@@ -596,6 +603,13 @@ define double @cos_f64(double %x) #0 {
ret double %val
}
+; CHECK-LABEL: tan_f64:
+; CHECK: bl tan
+define double @tan_f64(double %x) #0 {
+ %val = call double @llvm.experimental.constrained.tan.f64(double %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
+ ret double %val
+}
+
; CHECK-LABEL: pow_f64:
; CHECK: bl pow
define double @pow_f64(double %x, double %y) #0 {
@@ -1023,6 +1037,7 @@ declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
@@ -1056,6 +1071,7 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/fp-intrinsics.ll b/llvm/test/CodeGen/X86/fp-intrinsics.ll
index d2b45ee1e03e6..8c48e6f9da80a 100644
--- a/llvm/test/CodeGen/X86/fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/fp-intrinsics.ll
@@ -2758,6 +2758,58 @@ entry:
ret float %result
}
+; Verify that tan(42.0) isn't simplified when the rounding mode is unknown.
+define double @ftan() #0 {
+; X87-LABEL: ftan:
+; X87: # %bb.0: # %entry
+; X87-NEXT: subl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 16
+; X87-NEXT: flds {{\.?LCPI[0-9]+_[0-9]+}}
+; X87-NEXT: fstpl (%esp)
+; X87-NEXT: wait
+; X87-NEXT: calll tan
+; X87-NEXT: addl $12, %esp
+; X87-NEXT: .cfi_def_cfa_offset 4
+; X87-NEXT: retl
+;
+; X86-SSE-LABEL: ftan:
+; X86-SSE: # %bb.0: # %entry
+; X86-SSE-NEXT: subl $12, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 16
+; X86-SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; X86-SSE-NEXT: movsd %xmm0, (%esp)
+; X86-SSE-NEXT: calll tan
+; X86-SSE-NEXT: addl $12, %esp
+; X86-SSE-NEXT: .cfi_def_cfa_offset 4
+; X86-SSE-NEXT: retl
+;
+; SSE-LABEL: ftan:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: pushq %rax
+; SSE-NEXT: .cfi_def_cfa_offset 16
+; SSE-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; SSE-NEXT: callq tan at PLT
+; SSE-NEXT: popq %rax
+; SSE-NEXT: .cfi_def_cfa_offset 8
+; SSE-NEXT: retq
+;
+; AVX-LABEL: ftan:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
+entry:
+ %result = call double @llvm.experimental.constrained.tan.f64(double 42.0,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret double %result
+}
+
+
attributes #0 = { strictfp }
@llvm.fp.env = thread_local global i8 zeroinitializer, section "llvm.metadata"
@@ -2771,6 +2823,7 @@ declare double @llvm.experimental.constrained.pow.f64(double, double, metadata,
declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
+declare double @llvm.experimental.constrained.tan.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/fp-strict-libcalls-msvc32.ll b/llvm/test/CodeGen/X86/fp-strict-libcalls-msvc32.ll
index 1bc308bef8ccc..cfec52c0e6886 100644
--- a/llvm/test/CodeGen/X86/fp-strict-libcalls-msvc32.ll
+++ b/llvm/test/CodeGen/X86/fp-strict-libcalls-msvc32.ll
@@ -160,6 +160,23 @@ define float @sin(float %x) #0 {
ret float %result
}
+define float @tan(float %x) #0 {
+; CHECK-LABEL: tan:
+; CHECK: # %bb.0:
+; CHECK-NEXT: subl $12, %esp
+; CHECK-NEXT: flds {{[0-9]+}}(%esp)
+; CHECK-NEXT: fstpl (%esp)
+; CHECK-NEXT: wait
+; CHECK-NEXT: calll _tan
+; CHECK-NEXT: fstps {{[0-9]+}}(%esp)
+; CHECK-NEXT: flds {{[0-9]+}}(%esp)
+; CHECK-NEXT: wait
+; CHECK-NEXT: addl $12, %esp
+; CHECK-NEXT: retl
+ %result = call float @llvm.experimental.constrained.tan.f32(float %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret float %result
+}
+
attributes #0 = { strictfp }
declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
@@ -171,3 +188,4 @@ declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
+declare float @llvm.experimental.constrained.tan.f32(float, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
index f1d473f81a9fa..bd51f553587db 100644
--- a/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
+++ b/llvm/test/CodeGen/X86/fp128-libcalls-strict.ll
@@ -1047,6 +1047,46 @@ entry:
ret fp128 %sqrt
}
+define fp128 @tan(fp128 %x) nounwind strictfp {
+; ANDROID-LABEL: tan:
+; ANDROID: # %bb.0: # %entry
+; ANDROID-NEXT: pushq %rax
+; ANDROID-NEXT: callq tanl at PLT
+; ANDROID-NEXT: popq %rax
+; ANDROID-NEXT: retq
+;
+; GNU-LABEL: tan:
+; GNU: # %bb.0: # %entry
+; GNU-NEXT: pushq %rax
+; GNU-NEXT: callq tanf128 at PLT
+; GNU-NEXT: popq %rax
+; GNU-NEXT: retq
+;
+; X86-LABEL: tan:
+; X86: # %bb.0: # %entry
+; X86-NEXT: pushl %esi
+; X86-NEXT: subl $24, %esp
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: leal {{[0-9]+}}(%esp), %eax
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl %eax
+; X86-NEXT: calll tanl
+; X86-NEXT: addl $28, %esp
+; X86-NEXT: movaps (%esp), %xmm0
+; X86-NEXT: movaps %xmm0, (%esi)
+; X86-NEXT: movl %esi, %eax
+; X86-NEXT: addl $24, %esp
+; X86-NEXT: popl %esi
+; X86-NEXT: retl $4
+entry:
+ %tan = call fp128 @llvm.experimental.constrained.tan.f128(fp128 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret fp128 %tan
+}
+
define fp128 @trunc(fp128 %x) nounwind strictfp {
; ANDROID-LABEL: trunc:
; ANDROID: # %bb.0: # %entry
@@ -1663,6 +1703,7 @@ declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.roundeven.f128(fp128, metadata)
declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
+declare fp128 @llvm.experimental.constrained.tan.f128(fp128, metadata, metadata)
declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
declare i32 @llvm.experimental.constrained.lrint.i32.f128(fp128, metadata, metadata)
declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
index 4d50b15e5c185..89729975cfd61 100644
--- a/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
+++ b/llvm/test/CodeGen/X86/fp80-strict-libcalls.ll
@@ -504,6 +504,31 @@ entry:
ret x86_fp80 %sin
}
+define x86_fp80 @tan(x86_fp80 %x) nounwind strictfp {
+; X86-LABEL: tan:
+; X86: # %bb.0: # %entry
+; X86-NEXT: subl $12, %esp
+; X86-NEXT: fldt {{[0-9]+}}(%esp)
+; X86-NEXT: fstpt (%esp)
+; X86-NEXT: wait
+; X86-NEXT: calll tanl
+; X86-NEXT: addl $12, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: tan:
+; X64: # %bb.0: # %entry
+; X64-NEXT: subq $24, %rsp
+; X64-NEXT: fldt {{[0-9]+}}(%rsp)
+; X64-NEXT: fstpt (%rsp)
+; X64-NEXT: wait
+; X64-NEXT: callq tanl at PLT
+; X64-NEXT: addq $24, %rsp
+; X64-NEXT: retq
+entry:
+ %tan = call x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80 %x, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+ ret x86_fp80 %tan
+}
+
define x86_fp80 @trunc(x86_fp80 %x) nounwind strictfp {
; X86-LABEL: trunc:
; X86: # %bb.0: # %entry
@@ -650,6 +675,7 @@ declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, met
declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata)
declare x86_fp80 @llvm.experimental.constrained.roundeven.f80(x86_fp80, metadata)
declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata)
+declare x86_fp80 @llvm.experimental.constrained.tan.f80(x86_fp80, metadata, metadata)
declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
declare i32 @llvm.experimental.constrained.lrint.i32.f80(x86_fp80, metadata, metadata)
declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
diff --git a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
index 0adb9ddfc426a..c6f2dd64069e1 100644
--- a/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
+++ b/llvm/test/CodeGen/X86/vector-constrained-fp-intrinsics.ll
@@ -7771,6 +7771,234 @@ define <16 x float> @vpaddd_mask_test(<16 x float> %i, <16 x float> %j, <16 x i3
%r = select <16 x i1> %mask, <16 x float> %x, <16 x float> %i
ret <16 x float> %r
}
+
+define <1 x float> @constrained_vector_tan_v1f32() #0 {
+; CHECK-LABEL: constrained_vector_tan_v1f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: callq tanf at PLT
+; CHECK-NEXT: popq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_tan_v1f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: pushq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 16
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: callq tanf at PLT
+; AVX-NEXT: popq %rax
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
+entry:
+ %tan = call <1 x float> @llvm.experimental.constrained.tan.v1f32(
+ <1 x float> <float 42.0>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <1 x float> %tan
+}
+
+define <2 x double> @constrained_vector_tan_v2f64() #0 {
+; CHECK-LABEL: constrained_vector_tan_v2f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_tan_v2f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
+entry:
+ %tan = call <2 x double> @llvm.experimental.constrained.tan.v2f64(
+ <2 x double> <double 42.0, double 42.1>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <2 x double> %tan
+}
+
+define <3 x float> @constrained_vector_tan_v3f32() #0 {
+; CHECK-LABEL: constrained_vector_tan_v3f32:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: callq tanf at PLT
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: callq tanf at PLT
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: movss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; CHECK-NEXT: callq tanf at PLT
+; CHECK-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
+; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT: movaps %xmm1, %xmm0
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_tan_v3f32:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.4E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: callq tanf at PLT
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.2E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: callq tanf at PLT
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovss {{.*#+}} xmm0 = [4.3E+1,0.0E+0,0.0E+0,0.0E+0]
+; AVX-NEXT: callq tanf at PLT
+; AVX-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload
+; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[2,3]
+; AVX-NEXT: vinsertps $32, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0,1],mem[0],xmm0[3]
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
+entry:
+ %tan = call <3 x float> @llvm.experimental.constrained.tan.v3f32(
+ <3 x float> <float 42.0, float 43.0, float 44.0>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <3 x float> %tan
+}
+
+define <3 x double> @constrained_vector_tan_v3f64() #0 {
+; CHECK-LABEL: constrained_vector_tan_v3f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: subq $24, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: movsd %xmm0, {{[0-9]+}}(%rsp)
+; CHECK-NEXT: fldl {{[0-9]+}}(%rsp)
+; CHECK-NEXT: wait
+; CHECK-NEXT: movsd (%rsp), %xmm0 # 8-byte Reload
+; CHECK-NEXT: # xmm0 = mem[0],zero
+; CHECK-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
+; CHECK-NEXT: # xmm1 = mem[0],zero
+; CHECK-NEXT: addq $24, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_tan_v3f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; AVX-NEXT: vzeroupper
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vmovups (%rsp), %ymm1 # 32-byte Reload
+; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
+entry:
+ %tan = call <3 x double> @llvm.experimental.constrained.tan.v3f64(
+ <3 x double> <double 42.0, double 42.1, double 42.2>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <3 x double> %tan
+}
+
+define <4 x double> @constrained_vector_tan_v4f64() #0 {
+; CHECK-LABEL: constrained_vector_tan_v4f64:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: subq $40, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 48
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: unpcklpd (%rsp), %xmm0 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm0 = xmm0[0],mem[0]
+; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; CHECK-NEXT: movsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; CHECK-NEXT: callq tan at PLT
+; CHECK-NEXT: movaps %xmm0, %xmm1
+; CHECK-NEXT: unpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
+; CHECK-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
+; CHECK-NEXT: addq $40, %rsp
+; CHECK-NEXT: .cfi_def_cfa_offset 8
+; CHECK-NEXT: retq
+;
+; AVX-LABEL: constrained_vector_tan_v4f64:
+; AVX: # %bb.0: # %entry
+; AVX-NEXT: subq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 48
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2299999999999997E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2200000000000003E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vunpcklpd (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2100000000000001E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; AVX-NEXT: vmovsd {{.*#+}} xmm0 = [4.2E+1,0.0E+0]
+; AVX-NEXT: callq tan at PLT
+; AVX-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload
+; AVX-NEXT: # xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: vinsertf128 $1, (%rsp), %ymm0, %ymm0 # 16-byte Folded Reload
+; AVX-NEXT: addq $40, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
+entry:
+ %tan = call <4 x double> @llvm.experimental.constrained.tan.v4f64(
+ <4 x double> <double 42.0, double 42.1,
+ double 42.2, double 42.3>,
+ metadata !"round.dynamic",
+ metadata !"fpexcept.strict") #0
+ ret <4 x double> %tan
+}
+
+
+
declare <16 x float> @llvm.experimental.constrained.fadd.v16f32(<16 x float>, <16 x float>, metadata, metadata)
attributes #0 = { strictfp }
@@ -7786,6 +8014,7 @@ declare <2 x double> @llvm.experimental.constrained.pow.v2f64(<2 x double>, <2 x
declare <2 x double> @llvm.experimental.constrained.powi.v2f64(<2 x double>, i32, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.sin.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.cos.v2f64(<2 x double>, metadata, metadata)
+declare <2 x double> @llvm.experimental.constrained.tan.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.exp.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.exp2.v2f64(<2 x double>, metadata, metadata)
declare <2 x double> @llvm.experimental.constrained.log.v2f64(<2 x double>, metadata, metadata)
@@ -7829,6 +8058,7 @@ declare <1 x float> @llvm.experimental.constrained.pow.v1f32(<1 x float>, <1 x f
declare <1 x float> @llvm.experimental.constrained.powi.v1f32(<1 x float>, i32, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.sin.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.cos.v1f32(<1 x float>, metadata, metadata)
+declare <1 x float> @llvm.experimental.constrained.tan.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.exp.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.exp2.v1f32(<1 x float>, metadata, metadata)
declare <1 x float> @llvm.experimental.constrained.log.v1f32(<1 x float>, metadata, metadata)
@@ -7881,7 +8111,7 @@ declare <3 x double> @llvm.experimental.constrained.powi.v3f64(<3 x double>, i32
declare <3 x float> @llvm.experimental.constrained.sin.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.sin.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.cos.v3f32(<3 x float>, metadata, metadata)
-declare <3 x double> @llvm.experimental.constrained.cos.v3f64(<3 x double>, metadata, metadata)
+declare <3 x double> @llvm.experimental.constrained.tan.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.exp.v3f32(<3 x float>, metadata, metadata)
declare <3 x double> @llvm.experimental.constrained.exp.v3f64(<3 x double>, metadata, metadata)
declare <3 x float> @llvm.experimental.constrained.exp2.v3f32(<3 x float>, metadata, metadata)
@@ -7938,6 +8168,7 @@ declare <4 x double> @llvm.experimental.constrained.pow.v4f64(<4 x double>, <4 x
declare <4 x double> @llvm.experimental.constrained.powi.v4f64(<4 x double>, i32, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.sin.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.cos.v4f64(<4 x double>, metadata, metadata)
+declare <4 x double> @llvm.experimental.constrained.tan.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.exp.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.exp2.v4f64(<4 x double>, metadata, metadata)
declare <4 x double> @llvm.experimental.constrained.log.v4f64(<4 x double>, metadata, metadata)
More information about the cfe-commits
mailing list