[clang] [llvm] Implement operand bundles for floating-point operations (PR #109798)
Serge Pavlov via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 9 07:17:52 PST 2025
https://github.com/spavloff updated https://github.com/llvm/llvm-project/pull/109798
>From 28088b448bb3b25b4643ce4d120a9c0f831b2711 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Thu, 22 Aug 2024 17:33:20 +0700
Subject: [PATCH 1/7] Implement operand bundles for floating-point operations
Currently floating-point operations in general form (beyond the default
mode) are always represented by calls to constrained intrinsics. In
addition to the side effect, they carry additional information in the
form of metadata arguments. This scheme is not efficient in the case of
intrinsic function calls, as was noted in
https://discourse.llvm.org/t/thought-on-strictfp-support/71453, because
it requires defining a separate intrinsic for the same operation but
used in non-default FP environment. The solution proposed in the
discussion was "to move the complexity about the environment tracking
from the intrinsics themselves to the call instruction".
The way implemented in this change is to use operand bundles
(https://llvm.org/docs/LangRef.html#operand-bundles). This way was tried
previously (https://reviews.llvm.org/D93455), but was not finished.
This change does not add any new functionality, it only adds the new way
of keeping FP related information in LLVM IR. Metadata arguments of
constrained functions are preserved, but they are not used in the
queries like `getRoundingMode` or `getExceptionBehavior`.
---
clang/test/CodeGen/X86/strictfp_builtins.c | 6 +-
clang/test/CodeGen/strictfp_builtins.c | 34 ++++----
.../cl20-device-side-enqueue-attributes.cl | 4 +-
llvm/docs/LangRef.rst | 23 ++++++
llvm/include/llvm/ADT/FloatingPointMode.h | 9 +++
llvm/include/llvm/IR/AutoUpgrade.h | 2 +
llvm/include/llvm/IR/FPEnv.h | 9 +++
llvm/include/llvm/IR/IRBuilder.h | 48 ++++++++++++
llvm/include/llvm/IR/InstrTypes.h | 7 ++
llvm/include/llvm/IR/IntrinsicInst.h | 2 -
llvm/include/llvm/IR/LLVMContext.h | 2 +
llvm/lib/AsmParser/LLParser.cpp | 46 +++++++++++
llvm/lib/IR/AutoUpgrade.cpp | 78 +++++++++++++++++++
llvm/lib/IR/IRBuilder.cpp | 56 ++++++++++---
llvm/lib/IR/Instructions.cpp | 18 +++++
llvm/lib/IR/IntrinsicInst.cpp | 23 ------
llvm/lib/IR/LLVMContext.cpp | 10 +++
llvm/lib/IR/Verifier.cpp | 70 ++++++++++++++++-
.../Scalar/TailRecursionElimination.cpp | 10 ++-
llvm/lib/Transforms/Utils/CloneFunction.cpp | 16 +++-
.../Bitcode/operand-bundles-bc-analyzer.ll | 2 +
.../AMDGPU/amdgpu-simplify-libcall-pown.ll | 10 +--
22 files changed, 413 insertions(+), 72 deletions(-)
diff --git a/clang/test/CodeGen/X86/strictfp_builtins.c b/clang/test/CodeGen/X86/strictfp_builtins.c
index 43e4060bef259b5..75ed3a2555b3d7d 100644
--- a/clang/test/CodeGen/X86/strictfp_builtins.c
+++ b/clang/test/CodeGen/X86/strictfp_builtins.c
@@ -27,7 +27,7 @@ void p(char *str, int x) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR3]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR4:[0-9]+]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
@@ -43,7 +43,7 @@ void test_long_double_isinf(long double ld) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR3]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR4]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
@@ -59,7 +59,7 @@ void test_long_double_isfinite(long double ld) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR3]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR4]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
diff --git a/clang/test/CodeGen/strictfp_builtins.c b/clang/test/CodeGen/strictfp_builtins.c
index 58815c7de4fa940..2e7581157797116 100644
--- a/clang/test/CodeGen/strictfp_builtins.c
+++ b/clang/test/CodeGen/strictfp_builtins.c
@@ -31,21 +31,21 @@ void p(char *str, int x) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(i32 2) ]
// CHECK-NEXT: br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
// CHECK: fpclassify_end:
// CHECK-NEXT: [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
// CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[FPCLASSIFY_RESULT]]) #[[ATTR4]]
// CHECK-NEXT: ret void
// CHECK: fpclassify_not_zero:
-// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
// CHECK-NEXT: br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
// CHECK: fpclassify_not_nan:
-// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5:[0-9]+]]
-// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6:[0-9]+]]
+// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
// CHECK-NEXT: br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
// CHECK: fpclassify_not_inf:
-// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
// CHECK-NEXT: [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
// CHECK-NEXT: br label [[FPCLASSIFY_END]]
//
@@ -60,7 +60,7 @@ void test_fpclassify(double d) {
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -76,7 +76,7 @@ void test_fp16_isinf(_Float16 h) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -92,7 +92,7 @@ void test_float_isinf(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.4, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -108,7 +108,7 @@ void test_double_isinf(double d) {
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.5, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -124,7 +124,7 @@ void test_fp16_isfinite(_Float16 h) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.6, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -140,7 +140,7 @@ void test_float_isfinite(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.7, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -156,8 +156,8 @@ void test_double_isfinite(double d) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR5]]
-// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
+// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
// CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
// CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
@@ -176,7 +176,7 @@ void test_isinf_sign(double d) {
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.9, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -192,7 +192,7 @@ void test_fp16_isnan(_Float16 h) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.10, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -208,7 +208,7 @@ void test_float_isnan(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.11, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -224,7 +224,7 @@ void test_double_isnan(double d) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR5]]
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.12, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
index 451d30b4d86f0ed..4d931b0e1051334 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
@@ -144,7 +144,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
// STRICTFP-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) [[BLOCK_CAPTURE_ADDR1]], align 4
// STRICTFP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[TMP0]], i32 [[TMP1]]
// STRICTFP-NEXT: [[TMP2:%.*]] = load float, ptr addrspace(1) [[ARRAYIDX]], align 4
-// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]]
+// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 1), "fpe.except"(i32 2) ]
// STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 3
// STRICTFP-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[BLOCK_CAPTURE_ADDR2]], align 4
// STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR3:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 4
@@ -173,7 +173,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
// STRICTFP: attributes #[[ATTR2]] = { convergent noinline nounwind optnone strictfp "stack-protector-buffer-size"="8" }
// STRICTFP: attributes #[[ATTR3:[0-9]+]] = { nocallback nofree nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite) }
// STRICTFP: attributes #[[ATTR4]] = { convergent nounwind "stack-protector-buffer-size"="8" }
-// STRICTFP: attributes #[[ATTR5]] = { strictfp }
+// STRICTFP: attributes #[[ATTR5]] = { strictfp memory(inaccessiblemem: readwrite) }
//.
// SPIR32: [[META0:![0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// SPIR32: [[META1:![0-9]+]] = !{i32 2, i32 0}
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 7e01331b20c5709..37eaf0acec6c974 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3005,6 +3005,29 @@ A "convergencectrl" operand bundle is only valid on a ``convergent`` operation.
When present, the operand bundle must contain exactly one value of token type.
See the :doc:`ConvergentOperations` document for details.
+.. _ob_fpe:
+
+Floating-point Environment Operand Bundles
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+These operand bundles provide details on how the operation interacts with the
+:ref:`floating-point environment <_floatenv>`. There are two kinds of such
+operand bundles, which characterize interaction with floating-point control
+modes and status bits.
+
+An operand bundle tagged with "fpe.round" may be associated with the operations
+that may depend on rounding mode. It has an integer value, which represents
+the rounding mode with the same encoding as ``llvm::RoundingMode`` uses. If it
+is present and is not equal to ``llvm::Dynamic``, it specifies the rounding
+mode, which will be used for the operation evaluation. The value
+``llvm::RoundingMode`` indicates that the rounding mode used by the operation is
+specified in a floating-point control register.
+
+An operand bundle tagged with "fpe.except" may be associated with the operations
+that may read or write floating-point exception flags. It has the same meaning
+and encoding as the corresponding argument in
+:ref:`constrained intrinsics <_constrainedfp>`.
+
.. _moduleasm:
Module-Level Inline Assembly
diff --git a/llvm/include/llvm/ADT/FloatingPointMode.h b/llvm/include/llvm/ADT/FloatingPointMode.h
index 639d931ef88fec5..970cc89093924ba 100644
--- a/llvm/include/llvm/ADT/FloatingPointMode.h
+++ b/llvm/include/llvm/ADT/FloatingPointMode.h
@@ -47,6 +47,15 @@ enum class RoundingMode : int8_t {
Invalid = -1 ///< Denotes invalid value.
};
+inline bool isValidRoundingMode(int X) {
+ return X >= 0 && X <= static_cast<int>(RoundingMode::Dynamic);
+}
+
+inline RoundingMode castToRoundingMode(int X) {
+ assert(isValidRoundingMode(X));
+ return static_cast<RoundingMode>(X);
+}
+
/// Returns text representation of the given rounding mode.
inline StringRef spell(RoundingMode RM) {
switch (RM) {
diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h
index 97c3e4d7589d7bb..8bd005d73fba369 100644
--- a/llvm/include/llvm/IR/AutoUpgrade.h
+++ b/llvm/include/llvm/IR/AutoUpgrade.h
@@ -107,6 +107,8 @@ namespace llvm {
/// Upgrade operand bundles (without knowing about their user instruction).
void UpgradeOperandBundles(std::vector<OperandBundleDef> &OperandBundles);
+ CallBase *upgradeConstrainedFunctionCall(CallBase *CB);
+
} // End llvm namespace
#endif
diff --git a/llvm/include/llvm/IR/FPEnv.h b/llvm/include/llvm/IR/FPEnv.h
index a0197377759daf3..e4602bab6038e0a 100644
--- a/llvm/include/llvm/IR/FPEnv.h
+++ b/llvm/include/llvm/IR/FPEnv.h
@@ -43,6 +43,15 @@ enum ExceptionBehavior : uint8_t {
}
+inline bool isValidExceptionBehavior(unsigned X) {
+ return X <= fp::ExceptionBehavior::ebStrict;
+}
+
+inline fp::ExceptionBehavior castToExceptionBehavior(unsigned X) {
+ assert(isValidExceptionBehavior(X));
+ return static_cast<fp::ExceptionBehavior>(X);
+}
+
/// Returns a valid RoundingMode enumerator when given a string
/// that is valid as input in constrained intrinsic rounding mode
/// metadata.
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index b73309175f20d18..82c5a769ca44bcd 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -374,6 +374,9 @@ class IRBuilderBase {
void setConstrainedFPCallAttr(CallBase *I) {
I->addFnAttr(Attribute::StrictFP);
+ MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+ auto A = Attribute::getWithMemoryEffects(getContext(), ME);
+ I->addFnAttr(A);
}
void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
@@ -991,6 +994,16 @@ class IRBuilderBase {
ArrayRef<Value *> Args, FMFSource FMFSource = {},
const Twine &Name = "");
+ /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types and
+ /// with operand bundles.
+ /// If \p FMFSource is provided, copy fast-math-flags from that instruction to
+ /// the intrinsic.
+ CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
+ ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ Instruction *FMFSource = nullptr,
+ const Twine &Name = "");
+
/// Create a call to intrinsic \p ID with \p RetTy and \p Args. If
/// \p FMFSource is provided, copy fast-math-flags from that instruction to
/// the intrinsic.
@@ -1325,6 +1338,15 @@ class IRBuilderBase {
return I;
}
+ RoundingMode
+ getEffectiveRounding(std::optional<RoundingMode> Rounding = std::nullopt) {
+ RoundingMode RM = DefaultConstrainedRounding;
+
+ if (Rounding)
+ RM = *Rounding;
+ return RM;
+ }
+
Value *getConstrainedFPRounding(std::optional<RoundingMode> Rounding) {
RoundingMode UseRounding = DefaultConstrainedRounding;
@@ -1339,6 +1361,14 @@ class IRBuilderBase {
return MetadataAsValue::get(Context, RoundingMDS);
}
+ fp::ExceptionBehavior getEffectiveExceptionBehavior(
+ std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
+ fp::ExceptionBehavior EB = DefaultConstrainedExcept;
+ if (Except)
+ EB = *Except;
+ return EB;
+ }
+
Value *getConstrainedFPExcept(std::optional<fp::ExceptionBehavior> Except) {
std::optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(
Except.value_or(DefaultConstrainedExcept));
@@ -2479,6 +2509,10 @@ class IRBuilderBase {
Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
std::optional<RoundingMode> Rounding = std::nullopt,
std::optional<fp::ExceptionBehavior> Except = std::nullopt);
+ CallInst *CreateConstrainedFPCall(
+ Intrinsic::ID ID, ArrayRef<Value *> Args, const Twine &Name = "",
+ std::optional<RoundingMode> Rounding = std::nullopt,
+ std::optional<fp::ExceptionBehavior> Except = std::nullopt);
Value *CreateSelect(Value *C, Value *True, Value *False,
const Twine &Name = "", Instruction *MDFrom = nullptr);
@@ -2678,6 +2712,20 @@ class IRBuilderBase {
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
Value *Alignment,
Value *OffsetValue = nullptr);
+
+ void
+ createFPRoundingBundle(SmallVectorImpl<OperandBundleDef> &Bundles,
+ std::optional<RoundingMode> Rounding = std::nullopt) {
+ int RM = static_cast<int32_t>(getEffectiveRounding(Rounding));
+ Bundles.emplace_back("fpe.round", getInt32(RM));
+ }
+
+ void createFPExceptionBundle(
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
+ int EB = getEffectiveExceptionBehavior(Except);
+ Bundles.emplace_back("fpe.except", getInt32(EB));
+ }
};
/// This provides a uniform API for creating instructions and inserting
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index e6332a16df7d5f5..b2a20a231182ad8 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -25,6 +25,7 @@
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FMF.h"
+#include "llvm/IR/FPEnv.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
@@ -2131,6 +2132,12 @@ class CallBase : public Instruction {
return false;
}
+ /// Return rounding mode specified by operand bundles.
+ std::optional<RoundingMode> getRoundingMode() const;
+
+ /// Return exception behavior specified by operand bundles.
+ std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
+
/// Used to keep track of an operand bundle. See the main comment on
/// OperandBundleUser above.
struct BundleOpInfo {
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index 3436216d478e381..b5134a9ee900c5e 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -723,8 +723,6 @@ class VPBinOpIntrinsic : public VPIntrinsic {
class ConstrainedFPIntrinsic : public IntrinsicInst {
public:
unsigned getNonMetadataArgCount() const;
- std::optional<RoundingMode> getRoundingMode() const;
- std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
bool isDefaultFPEnvironment() const;
// Methods for support type inquiry through isa, cast, and dyn_cast:
diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h
index bbd125fd38cf1a4..cc7f66f17e2eb75 100644
--- a/llvm/include/llvm/IR/LLVMContext.h
+++ b/llvm/include/llvm/IR/LLVMContext.h
@@ -96,6 +96,8 @@ class LLVMContext {
OB_ptrauth = 7, // "ptrauth"
OB_kcfi = 8, // "kcfi"
OB_convergencectrl = 9, // "convergencectrl"
+ OB_fpe_round = 10, // "fpe.round"
+ OB_fpe_except = 11, // "fpe.except"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 52d48a69f0eb533..56becdcea896c92 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6317,6 +6317,50 @@ bool isOldDbgFormatIntrinsic(StringRef Name) {
FnID == Intrinsic::dbg_assign;
}
+bool updateConstrainedIntrinsic(StringRef Name, ArrayRef<Value *> Args,
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ LLVMContext &C) {
+ if (Args.empty())
+ return false;
+ if (!Name.starts_with("llvm.experimental.constrained."))
+ return false;
+ for (auto &B : Bundles) {
+ if (B.getTag().starts_with("fpe."))
+ return false;
+ }
+
+ const auto getMetadataArgumentValue = [](Value *Arg) -> StringRef {
+ if (auto *MAV = dyn_cast<MetadataAsValue>(Arg)) {
+ if (const auto *MD = MAV->getMetadata()) {
+ if (auto MDStr = dyn_cast<MDString>(MD))
+ return MDStr->getString();
+ }
+ }
+ return StringRef();
+ };
+
+ if (Args.size() > 1) {
+ Value *V = Args.take_back(2).front();
+ if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
+ if (auto RM = convertStrToRoundingMode(VStr)) {
+ int RMVal = static_cast<int>(*RM);
+ Bundles.emplace_back("fpe.round",
+ ConstantInt::get(Type::getInt32Ty(C), RMVal));
+ }
+ }
+ }
+
+ Value *V = Args.back();
+ if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
+ if (auto EB = convertStrToExceptionBehavior(VStr)) {
+ Bundles.emplace_back("fpe.except",
+ ConstantInt::get(Type::getInt32Ty(C), *EB));
+ }
+ }
+
+ return true;
+}
+
/// FunctionHeader
/// ::= OptionalLinkage OptionalPreemptionSpecifier OptionalVisibility
/// OptionalCallingConv OptRetAttrs OptUnnamedAddr Type GlobalName
@@ -8090,6 +8134,8 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
AttributeList::get(Context, AttributeSet::get(Context, FnAttrs),
AttributeSet::get(Context, RetAttrs), Attrs);
+ updateConstrainedIntrinsic(CalleeID.StrVal, Args, BundleList, Context);
+
CallInst *CI = CallInst::Create(Ty, Callee, Args, BundleList);
CI->setTailCallKind(TCK);
CI->setCallingConv(CC);
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 06e62bf7f9f7576..cc2b112e9ec858e 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -4324,6 +4324,64 @@ static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI) {
CI->getParent()->insertDbgRecordBefore(DR, CI->getIterator());
}
+static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
+ IRBuilder<> &Builder) {
+ if (CB->getOperandBundle(LLVMContext::OB_fpe_round))
+ return nullptr;
+
+ auto *CFPI = cast<ConstrainedFPIntrinsic>(F);
+ SmallVector<OperandBundleDef, 2> NewBundles;
+ LLVMContext &C = CB->getContext();
+
+ auto RM = CFPI->getRoundingMode();
+ if (RM) {
+ auto CurrentRM = CB->getRoundingMode();
+ if (CurrentRM) {
+ assert(*RM == *CurrentRM);
+ } else {
+ int RMValue = static_cast<int>(*RM);
+ NewBundles.emplace_back("fpe.round",
+ ConstantInt::get(Type::getInt32Ty(C), RMValue));
+ }
+ }
+
+ auto EB = CFPI->getExceptionBehavior();
+ if (EB) {
+ auto CurrentEB = CB->getExceptionBehavior();
+ if (CurrentEB) {
+ assert(*EB == *CurrentEB);
+ } else {
+ NewBundles.emplace_back("fpe.except",
+ ConstantInt::get(Type::getInt32Ty(C), *EB));
+ }
+ }
+
+ CallInst *NewCB = nullptr;
+ if (!NewBundles.empty()) {
+ SmallVector<Value *, 4> Args(CB->args());
+ SmallVector<OperandBundleDef, 2> Bundles;
+ CB->getOperandBundlesAsDefs(Bundles);
+ Bundles.append(NewBundles);
+
+ Builder.SetInsertPoint(CB->getParent(), CB->getIterator());
+ MDNode *FPMath = CB->getMetadata(LLVMContext::MD_fpmath);
+ NewCB = Builder.CreateCall(F, Args, Bundles, CB->getName(), FPMath);
+
+ NewCB->copyMetadata(*CB);
+ AttributeList Attrs = CB->getAttributes();
+ NewCB->setAttributes(Attrs);
+ if (isa<FPMathOperator>(CB)) {
+ FastMathFlags FMF = CB->getFastMathFlags();
+ NewCB->setFastMathFlags(FMF);
+ }
+ MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+ auto A = Attribute::getWithMemoryEffects(C, ME);
+ NewCB->addFnAttr(A);
+ }
+
+ return NewCB;
+}
+
/// Upgrade a call to an old intrinsic. All argument and return casting must be
/// provided to seamlessly integrate with existing context.
void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
@@ -4352,6 +4410,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
bool IsARM = Name.consume_front("arm.");
bool IsAMDGCN = Name.consume_front("amdgcn.");
bool IsDbg = Name.consume_front("dbg.");
+ bool IsConstrained = Name.starts_with("experimental.constrained.");
Value *Rep = nullptr;
if (!IsX86 && Name == "stackprotectorcheck") {
@@ -4380,6 +4439,8 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
} else {
upgradeDbgIntrinsicToDbgRecord(Name, CI);
}
+ } else if (IsConstrained) {
+ Rep = upgradeConstrainedIntrinsicCall(CI, F, Builder);
} else {
llvm_unreachable("Unknown function for CallBase upgrade.");
}
@@ -5644,3 +5705,20 @@ void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
OBD.inputs().empty();
});
}
+
+CallBase *llvm::upgradeConstrainedFunctionCall(CallBase *CB) {
+ Function *F = dyn_cast<Function>(CB->getCalledOperand());
+ if (!F)
+ return nullptr;
+
+ if (CB->getNumOperands() < 1)
+ return nullptr;
+
+ StringRef Name = F->getName();
+ if (!Name.starts_with("experimental.constrained."))
+ return nullptr;
+
+ LLVMContext &C = CB->getContext();
+ IRBuilder<> Builder(C);
+ return upgradeConstrainedIntrinsicCall(CB, F, Builder);
+}
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 27b499e42a4e4ca..95e91930210f072 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -897,6 +897,17 @@ CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
return createCallHelper(Fn, Args, Name, FMFSource);
}
+CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
+ ArrayRef<Type *> Types,
+ ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ Instruction *FMFSource,
+ const Twine &Name) {
+ Module *M = BB->getModule();
+ Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
+ return createCallHelper(Fn, Args, Name, FMFSource, OpBundles);
+}
+
CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
ArrayRef<Value *> Args,
FMFSource FMFSource,
@@ -933,8 +944,11 @@ CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
FastMathFlags UseFMF = FMFSource.get(FMF);
- CallInst *C = CreateIntrinsic(ID, {L->getType()},
- {L, R, RoundingV, ExceptV}, nullptr, Name);
+ SmallVector<OperandBundleDef, 2> OpBundles;
+ createFPRoundingBundle(OpBundles, Rounding);
+ createFPExceptionBundle(OpBundles, Except);
+ CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, RoundingV, ExceptV},
+ OpBundles, nullptr, Name);
setConstrainedFPCallAttr(C);
setFPAttrs(C, FPMathTag, UseFMF);
return C;
@@ -948,8 +962,12 @@ CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
FastMathFlags UseFMF = FMFSource.get(FMF);
- CallInst *C =
- CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, nullptr, Name);
+ SmallVector<OperandBundleDef, 2> OpBundles;
+ int EB = getEffectiveExceptionBehavior(Except);
+ OpBundles.emplace_back("fpe.except", getInt32(EB));
+
+ CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, OpBundles,
+ nullptr, Name);
setConstrainedFPCallAttr(C);
setFPAttrs(C, FPMathTag, UseFMF);
return C;
@@ -975,17 +993,22 @@ CallInst *IRBuilderBase::CreateConstrainedFPCast(
const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding,
std::optional<fp::ExceptionBehavior> Except) {
Value *ExceptV = getConstrainedFPExcept(Except);
+ bool HasRounding = Intrinsic::hasConstrainedFPRoundingModeOperand(ID);
FastMathFlags UseFMF = FMFSource.get(FMF);
+ SmallVector<OperandBundleDef, 2> OpBundles;
+ createFPRoundingBundle(OpBundles, Rounding);
+ createFPExceptionBundle(OpBundles, Except);
+
CallInst *C;
- if (Intrinsic::hasConstrainedFPRoundingModeOperand(ID)) {
+ if (HasRounding) {
Value *RoundingV = getConstrainedFPRounding(Rounding);
C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
- nullptr, Name);
+ OpBundles, nullptr, Name);
} else
- C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
- Name);
+ C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, OpBundles,
+ nullptr, Name);
setConstrainedFPCallAttr(C);
@@ -1017,8 +1040,12 @@ CallInst *IRBuilderBase::CreateConstrainedFPCmp(
Value *PredicateV = getConstrainedFPPredicate(P);
Value *ExceptV = getConstrainedFPExcept(Except);
- CallInst *C = CreateIntrinsic(ID, {L->getType()},
- {L, R, PredicateV, ExceptV}, nullptr, Name);
+ SmallVector<OperandBundleDef, 1> OpBundles;
+ int EB = getEffectiveExceptionBehavior(Except);
+ OpBundles.emplace_back("fpe.except", getInt32(EB));
+
+ CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, PredicateV, ExceptV},
+ OpBundles, nullptr, Name);
setConstrainedFPCallAttr(C);
return C;
}
@@ -1028,14 +1055,19 @@ CallInst *IRBuilderBase::CreateConstrainedFPCall(
std::optional<RoundingMode> Rounding,
std::optional<fp::ExceptionBehavior> Except) {
llvm::SmallVector<Value *, 6> UseArgs;
+ SmallVector<OperandBundleDef, 2> OpBundles;
append_range(UseArgs, Args);
- if (Intrinsic::hasConstrainedFPRoundingModeOperand(Callee->getIntrinsicID()))
+ if (Intrinsic::hasConstrainedFPRoundingModeOperand(
+ Callee->getIntrinsicID())) {
UseArgs.push_back(getConstrainedFPRounding(Rounding));
+ createFPRoundingBundle(OpBundles, Rounding);
+ }
UseArgs.push_back(getConstrainedFPExcept(Except));
+ createFPExceptionBundle(OpBundles, Except);
- CallInst *C = CreateCall(Callee, UseArgs, Name);
+ CallInst *C = CreateCall(Callee, UseArgs, OpBundles, Name);
setConstrainedFPCallAttr(C);
return C;
}
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 2d6fe40f4c1de00..2a851802a12325c 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -604,6 +604,24 @@ bool CallBase::hasClobberingOperandBundles() const {
getIntrinsicID() != Intrinsic::assume;
}
+std::optional<RoundingMode> CallBase::getRoundingMode() const {
+ if (auto RoundingBundle = getOperandBundle(LLVMContext::OB_fpe_round)) {
+ uint64_t RM =
+ cast<ConstantInt>(RoundingBundle->Inputs.front())->getSExtValue();
+ return castToRoundingMode(RM);
+ }
+ return std::nullopt;
+}
+
+std::optional<fp::ExceptionBehavior> CallBase::getExceptionBehavior() const {
+ if (auto ExceptionBundle = getOperandBundle(LLVMContext::OB_fpe_except)) {
+ uint64_t EB =
+ cast<ConstantInt>(ExceptionBundle->Inputs.front())->getZExtValue();
+ return castToExceptionBehavior(EB);
+ }
+ return std::nullopt;
+}
+
MemoryEffects CallBase::getMemoryEffects() const {
MemoryEffects ME = getAttributes().getMemoryEffects();
if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index ad174b1487a6435..2d2011cd44ff8b1 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -273,29 +273,6 @@ void InstrProfCallsite::setCallee(Value *Callee) {
setArgOperand(4, Callee);
}
-std::optional<RoundingMode> ConstrainedFPIntrinsic::getRoundingMode() const {
- unsigned NumOperands = arg_size();
- Metadata *MD = nullptr;
- auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 2));
- if (MAV)
- MD = MAV->getMetadata();
- if (!MD || !isa<MDString>(MD))
- return std::nullopt;
- return convertStrToRoundingMode(cast<MDString>(MD)->getString());
-}
-
-std::optional<fp::ExceptionBehavior>
-ConstrainedFPIntrinsic::getExceptionBehavior() const {
- unsigned NumOperands = arg_size();
- Metadata *MD = nullptr;
- auto *MAV = dyn_cast<MetadataAsValue>(getArgOperand(NumOperands - 1));
- if (MAV)
- MD = MAV->getMetadata();
- if (!MD || !isa<MDString>(MD))
- return std::nullopt;
- return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
-}
-
bool ConstrainedFPIntrinsic::isDefaultFPEnvironment() const {
std::optional<fp::ExceptionBehavior> Except = getExceptionBehavior();
if (Except) {
diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp
index 447e5d92e0b99dd..f0f9673f3e8cc9f 100644
--- a/llvm/lib/IR/LLVMContext.cpp
+++ b/llvm/lib/IR/LLVMContext.cpp
@@ -82,6 +82,16 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
assert(Entry->second == BundleTagID && "operand bundle id drifted!");
}
+ auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.round");
+ assert(RoundingEntry->second == LLVMContext::OB_fpe_round &&
+ "fpe.round operand bundle id drifted!");
+ (void)RoundingEntry;
+
+ auto *ExceptionEntry = pImpl->getOrInsertBundleTag("fpe.except");
+ assert(ExceptionEntry->second == LLVMContext::OB_fpe_except &&
+ "fpe.except operand bundle id drifted!");
+ (void)ExceptionEntry;
+
SyncScope::ID SingleThreadSSID =
pImpl->getOrInsertSyncScopeID("singlethread");
assert(SingleThreadSSID == SyncScope::SingleThread &&
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 7b6f7b5aa6171a4..712836a227b347c 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -658,6 +658,9 @@ class Verifier : public InstVisitor<Verifier>, VerifierSupport {
/// Verify the llvm.experimental.noalias.scope.decl declarations
void verifyNoAliasScopeDecl();
+
+ /// Verify the call of a constrained intrinsic call.
+ void verifyConstrainedInstrinsicCall(const CallBase &CB);
};
} // end anonymous namespace
@@ -3726,7 +3729,9 @@ void Verifier::visitCallBase(CallBase &Call) {
FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
FoundPtrauthBundle = false, FoundKCFIBundle = false,
- FoundAttachedCallBundle = false;
+ FoundAttachedCallBundle = false, FoundFpeRoundBundle = false,
+ FoundFpeExceptBundle = false;
+
for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
OperandBundleUse BU = Call.getOperandBundleAt(i);
uint32_t Tag = BU.getTagID();
@@ -3789,9 +3794,31 @@ void Verifier::visitCallBase(CallBase &Call) {
"Multiple \"clang.arc.attachedcall\" operand bundles", Call);
FoundAttachedCallBundle = true;
verifyAttachedCallBundle(Call, BU);
+ } else if (Tag == LLVMContext::OB_fpe_round) {
+ Check(!FoundFpeRoundBundle, "Multiple fpe.round operand bundles", Call);
+ Check(BU.Inputs.size() == 1,
+ "Expected exactly one fpe.round bundle operand", Call);
+ auto RM = dyn_cast<ConstantInt>(BU.Inputs.front());
+ Check(RM, "Value of fpe.round bundle operand must be an integer", Call);
+ Check(isValidRoundingMode(RM->getSExtValue()),
+ "Invalid value of fpe.round bundle operand", Call);
+ FoundFpeRoundBundle = true;
+ } else if (Tag == LLVMContext::OB_fpe_except) {
+ Check(!FoundFpeExceptBundle, "Multiple fpe.except operand bundles", Call);
+ Check(BU.Inputs.size() == 1,
+ "Expected exactly one fpe.except bundle operand", Call);
+ auto EB = dyn_cast<ConstantInt>(BU.Inputs.front());
+ Check(EB, "Value of fpe.except bundle operand must be an integer", Call);
+ Check(isValidExceptionBehavior(EB->getZExtValue()),
+ "Invalid value of fpe.except bundle operand", Call);
+ FoundFpeExceptBundle = true;
}
}
+ // Verify if FP options specified in constrained intrinsic arguments agree
+ // with the options specified in operand bundles.
+ verifyConstrainedInstrinsicCall(Call);
+
// Verify that callee and callsite agree on whether to use pointer auth.
Check(!(Call.getCalledFunction() && FoundPtrauthBundle),
"Direct call cannot have a ptrauth bundle", Call);
@@ -3818,6 +3845,47 @@ void Verifier::visitCallBase(CallBase &Call) {
visitInstruction(Call);
}
+void Verifier::verifyConstrainedInstrinsicCall(const CallBase &CB) {
+ const auto *CFPI = dyn_cast<ConstrainedFPIntrinsic>(&CB);
+ if (!CFPI)
+ return;
+
+ // FP metadata arguments must not conflict with the corresponding
+ // operand bundles.
+ if (std::optional<RoundingMode> RM = CFPI->getRoundingMode()) {
+ RoundingMode Rounding = *RM;
+ auto RoundingBundle = CB.getOperandBundle(LLVMContext::OB_fpe_round);
+ Check(RoundingBundle,
+ "Constrained intrinsic has a rounding argument but the call does not",
+ CB);
+ if (RoundingBundle) {
+ OperandBundleUse OBU = *RoundingBundle;
+ uint64_t BundleRM = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
+ Check(BundleRM == static_cast<uint64_t>(Rounding),
+ "Rounding mode of the constrained intrinsic differs from that in "
+ "operand bundle",
+ CB);
+ }
+ }
+
+ if (std::optional<fp::ExceptionBehavior> EB = CFPI->getExceptionBehavior()) {
+ fp::ExceptionBehavior Excepts = *EB;
+ auto ExceptionBundle = CB.getOperandBundle(LLVMContext::OB_fpe_except);
+ Check(ExceptionBundle,
+ "Constrained intrinsic has an exception handling argument but the "
+ "call does not",
+ CB);
+ if (ExceptionBundle) {
+ OperandBundleUse OBU = *ExceptionBundle;
+ uint64_t BundleEB = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
+ Check(BundleEB == static_cast<uint64_t>(Excepts),
+ "Exception behavior of the constrained intrinsic differs from that "
+ "in operand bundle",
+ CB);
+ }
+ }
+}
+
void Verifier::verifyTailCCMustTailAttrs(const AttrBuilder &Attrs,
StringRef Context) {
Check(!Attrs.contains(Attribute::InAlloca),
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 53e486f3dc6cdaf..478691d031150f4 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -251,10 +251,12 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) {
// Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and
// "kcfi".
- bool IsNoTail = CI->isNoTailCall() ||
- CI->hasOperandBundlesOtherThan(
- {LLVMContext::OB_clang_arc_attachedcall,
- LLVMContext::OB_ptrauth, LLVMContext::OB_kcfi});
+ bool IsNoTail =
+ CI->isNoTailCall() ||
+ CI->hasOperandBundlesOtherThan(
+ {LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_ptrauth,
+ LLVMContext::OB_kcfi, LLVMContext::OB_fpe_round,
+ LLVMContext::OB_fpe_except});
if (!IsNoTail && CI->doesNotAccessMemory()) {
// A call to a readnone function whose arguments are all things computed
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 8863dff4482a1a9..64648f9459cae99 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -494,13 +494,23 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
// The last arguments of a constrained intrinsic are metadata that
// represent rounding mode (absents in some intrinsics) and exception
// behavior. The inlined function uses default settings.
- if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID))
+ SmallVector<OperandBundleDef, 2> Bundles;
+ if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) {
Args.push_back(
MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest")));
+ Bundles.emplace_back(
+ "fpe.round",
+ ConstantInt::get(
+ Type::getInt32Ty(Ctx),
+ static_cast<int>(RoundingMode::NearestTiesToEven)));
+ }
Args.push_back(
MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore")));
-
- NewInst = CallInst::Create(IFn, Args, OldInst.getName() + ".strict");
+ Bundles.emplace_back("fpe.except",
+ ConstantInt::get(Type::getInt32Ty(Ctx),
+ fp::ExceptionBehavior::ebIgnore));
+ NewInst =
+ CallInst::Create(IFn, Args, Bundles, OldInst.getName() + ".strict");
}
}
if (!NewInst)
diff --git a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll
index d860104b9cb3d97..01e5b3f6673ae55 100644
--- a/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll
+++ b/llvm/test/Bitcode/operand-bundles-bc-analyzer.ll
@@ -13,6 +13,8 @@
; CHECK-NEXT: <OPERAND_BUNDLE_TAG
; CHECK-NEXT: <OPERAND_BUNDLE_TAG
; CHECK-NEXT: <OPERAND_BUNDLE_TAG
+; CHECK-NEXT: <OPERAND_BUNDLE_TAG
+; CHECK-NEXT: <OPERAND_BUNDLE_TAG
; CHECK-NEXT: </OPERAND_BUNDLE_TAGS_BLOCK
; CHECK: <FUNCTION_BLOCK
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index f9c359bc114ed3a..3e4690ec3640f2f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -819,11 +819,11 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
; CHECK-LABEL: define float @test_pown_fast_f32_strictfp
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR0]]
-; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR0]]
-; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR0]]
+; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR5:[0-9]+]]
+; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR5]]
+; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
+; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
+; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR5]]
; CHECK-NEXT: [[__YEVEN:%.*]] = shl i32 [[Y]], 31
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[X]] to i32
; CHECK-NEXT: [[__POW_SIGN:%.*]] = and i32 [[__YEVEN]], [[TMP0]]
>From 7dd3f8779c550d02ea4aaffe06ab8cb2edd2e4a5 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Wed, 25 Sep 2024 17:25:00 +0700
Subject: [PATCH 2/7] Use metadata for bundle values
---
clang/test/CodeGen/strictfp_builtins.c | 10 +-
.../cl20-device-side-enqueue-attributes.cl | 2 +-
llvm/docs/LangRef.rst | 37 +-
llvm/include/llvm/ADT/FloatingPointMode.h | 9 -
llvm/include/llvm/AsmParser/LLParser.h | 5 +
llvm/include/llvm/IR/FPEnv.h | 21 +-
llvm/include/llvm/IR/IRBuilder.h | 49 +--
llvm/include/llvm/IR/InstrTypes.h | 7 +
llvm/include/llvm/IR/IntrinsicInst.h | 7 +
llvm/include/llvm/IR/LLVMContext.h | 2 +-
llvm/lib/AsmParser/LLParser.cpp | 48 +--
llvm/lib/Bitcode/Reader/BitcodeReader.cpp | 8 +-
llvm/lib/IR/AutoUpgrade.cpp | 56 +--
llvm/lib/IR/FPEnv.cpp | 45 ++-
llvm/lib/IR/IRBuilder.cpp | 61 +++-
llvm/lib/IR/Instructions.cpp | 34 +-
llvm/lib/IR/IntrinsicInst.cpp | 33 ++
llvm/lib/IR/LLVMContext.cpp | 4 +-
llvm/lib/IR/Verifier.cpp | 57 +--
.../Scalar/TailRecursionElimination.cpp | 2 +-
llvm/lib/Transforms/Utils/CloneFunction.cpp | 22 +-
llvm/test/Bitcode/auto-upgrade-constrained.ll | 327 ++++++++++++++++++
.../Bitcode/auto-upgrade-constrained.ll.bc | Bin 0 -> 8120 bytes
.../AMDGPU/amdgpu-simplify-libcall-pown.ll | 4 +-
llvm/test/Transforms/Attributor/nofpclass.ll | 4 +-
.../test/Transforms/Inline/inline-strictfp.ll | 25 +-
llvm/test/Verifier/fp-intrinsics.ll | 8 +-
27 files changed, 657 insertions(+), 230 deletions(-)
create mode 100644 llvm/test/Bitcode/auto-upgrade-constrained.ll
create mode 100644 llvm/test/Bitcode/auto-upgrade-constrained.ll.bc
diff --git a/clang/test/CodeGen/strictfp_builtins.c b/clang/test/CodeGen/strictfp_builtins.c
index 2e7581157797116..053265dcc0667fa 100644
--- a/clang/test/CodeGen/strictfp_builtins.c
+++ b/clang/test/CodeGen/strictfp_builtins.c
@@ -31,21 +31,21 @@ void p(char *str, int x) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT: [[ISZERO:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]] [ "fpe.except"(metadata !"strict") ]
// CHECK-NEXT: br i1 [[ISZERO]], label [[FPCLASSIFY_END:%.*]], label [[FPCLASSIFY_NOT_ZERO:%.*]]
// CHECK: fpclassify_end:
// CHECK-NEXT: [[FPCLASSIFY_RESULT:%.*]] = phi i32 [ 4, [[ENTRY:%.*]] ], [ 0, [[FPCLASSIFY_NOT_ZERO]] ], [ 1, [[FPCLASSIFY_NOT_NAN:%.*]] ], [ [[TMP2:%.*]], [[FPCLASSIFY_NOT_INF:%.*]] ]
// CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[FPCLASSIFY_RESULT]]) #[[ATTR4]]
// CHECK-NEXT: ret void
// CHECK: fpclassify_not_zero:
-// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT: [[CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP0]], metadata !"uno", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
// CHECK-NEXT: br i1 [[CMP]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_NAN]]
// CHECK: fpclassify_not_nan:
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6:[0-9]+]]
-// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
// CHECK-NEXT: br i1 [[ISINF]], label [[FPCLASSIFY_END]], label [[FPCLASSIFY_NOT_INF]]
// CHECK: fpclassify_not_inf:
-// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT: [[ISNORMAL:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x10000000000000, metadata !"uge", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
// CHECK-NEXT: [[TMP2]] = select i1 [[ISNORMAL]], i32 2, i32 3
// CHECK-NEXT: br label [[FPCLASSIFY_END]]
//
@@ -157,7 +157,7 @@ void test_double_isfinite(double d) {
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[TMP0]]) #[[ATTR6]]
-// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(i32 2) ]
+// CHECK-NEXT: [[ISINF:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP1]], double 0x7FF0000000000000, metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.except"(metadata !"strict") ]
// CHECK-NEXT: [[TMP2:%.*]] = bitcast double [[TMP0]] to i64
// CHECK-NEXT: [[TMP3:%.*]] = icmp slt i64 [[TMP2]], 0
// CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 -1, i32 1
diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
index 4d931b0e1051334..31f1aa60780b9e6 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
@@ -144,7 +144,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
// STRICTFP-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) [[BLOCK_CAPTURE_ADDR1]], align 4
// STRICTFP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[TMP0]], i32 [[TMP1]]
// STRICTFP-NEXT: [[TMP2:%.*]] = load float, ptr addrspace(1) [[ARRAYIDX]], align 4
-// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 1), "fpe.except"(i32 2) ]
+// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
// STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 3
// STRICTFP-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[BLOCK_CAPTURE_ADDR2]], align 4
// STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR3:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 4
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 37eaf0acec6c974..79beea32f4110b9 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3013,19 +3013,36 @@ Floating-point Environment Operand Bundles
These operand bundles provide details on how the operation interacts with the
:ref:`floating-point environment <_floatenv>`. There are two kinds of such
operand bundles, which characterize interaction with floating-point control
-modes and status bits.
+modes and status bits respectively.
-An operand bundle tagged with "fpe.round" may be associated with the operations
-that may depend on rounding mode. It has an integer value, which represents
-the rounding mode with the same encoding as ``llvm::RoundingMode`` uses. If it
-is present and is not equal to ``llvm::Dynamic``, it specifies the rounding
-mode, which will be used for the operation evaluation. The value
-``llvm::RoundingMode`` indicates that the rounding mode used by the operation is
-specified in a floating-point control register.
+An operand bundle tagged with "fpe.control" keeps information about control
+modes used by the operation. Only rounding mode is supported now. It is
+represented by a metadata string value and specifies the rounding mode, which
+will be used for the operation evaluation. Possible values are:
+
+::
+
+ "rtz" - toward zero
+ "rte" - to nearest, ties to even
+ "rtp" - toward positive infinity
+ "rtn" - toward negative infinity
+ "rmm" - to nearest, ties away from zero
+ "dyn" - rounding mode is taken from control register
+
+If "fpe.control" is absent, default rounding rounding to nearest, ties to even
+is assumed.
An operand bundle tagged with "fpe.except" may be associated with the operations
-that may read or write floating-point exception flags. It has the same meaning
-and encoding as the corresponding argument in
+that may read or write floating-point exception flags. It has a single metadata
+string value, which may have one of the values:
+
+::
+
+ "ignore"
+ "strict"
+ "maytrap"
+
+It has the same meaning as the corresponding argument in
:ref:`constrained intrinsics <_constrainedfp>`.
.. _moduleasm:
diff --git a/llvm/include/llvm/ADT/FloatingPointMode.h b/llvm/include/llvm/ADT/FloatingPointMode.h
index 970cc89093924ba..639d931ef88fec5 100644
--- a/llvm/include/llvm/ADT/FloatingPointMode.h
+++ b/llvm/include/llvm/ADT/FloatingPointMode.h
@@ -47,15 +47,6 @@ enum class RoundingMode : int8_t {
Invalid = -1 ///< Denotes invalid value.
};
-inline bool isValidRoundingMode(int X) {
- return X >= 0 && X <= static_cast<int>(RoundingMode::Dynamic);
-}
-
-inline RoundingMode castToRoundingMode(int X) {
- assert(isValidRoundingMode(X));
- return static_cast<RoundingMode>(X);
-}
-
/// Returns text representation of the given rounding mode.
inline StringRef spell(RoundingMode RM) {
switch (RM) {
diff --git a/llvm/include/llvm/AsmParser/LLParser.h b/llvm/include/llvm/AsmParser/LLParser.h
index 8b195b028783f0b..d4859d79d31e2ea 100644
--- a/llvm/include/llvm/AsmParser/LLParser.h
+++ b/llvm/include/llvm/AsmParser/LLParser.h
@@ -566,6 +566,11 @@ namespace llvm {
bool resolveFunctionType(Type *RetType, ArrayRef<ParamInfo> ArgList,
FunctionType *&FuncTy);
+ void updateConstrainedIntrinsic(ValID &CalleeID,
+ SmallVectorImpl<ParamInfo> &Args,
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ AttrBuilder &FnAttrs);
+
// Constant Parsing.
bool parseValID(ValID &ID, PerFunctionState *PFS,
Type *ExpectedTy = nullptr);
diff --git a/llvm/include/llvm/IR/FPEnv.h b/llvm/include/llvm/IR/FPEnv.h
index e4602bab6038e0a..58a0c1956598c06 100644
--- a/llvm/include/llvm/IR/FPEnv.h
+++ b/llvm/include/llvm/IR/FPEnv.h
@@ -43,31 +43,26 @@ enum ExceptionBehavior : uint8_t {
}
-inline bool isValidExceptionBehavior(unsigned X) {
- return X <= fp::ExceptionBehavior::ebStrict;
-}
-
-inline fp::ExceptionBehavior castToExceptionBehavior(unsigned X) {
- assert(isValidExceptionBehavior(X));
- return static_cast<fp::ExceptionBehavior>(X);
-}
-
/// Returns a valid RoundingMode enumerator when given a string
/// that is valid as input in constrained intrinsic rounding mode
/// metadata.
-std::optional<RoundingMode> convertStrToRoundingMode(StringRef);
+std::optional<RoundingMode> convertStrToRoundingMode(StringRef,
+ bool InBundle = false);
/// For any RoundingMode enumerator, returns a string valid as input in
/// constrained intrinsic rounding mode metadata.
-std::optional<StringRef> convertRoundingModeToStr(RoundingMode);
+std::optional<StringRef> convertRoundingModeToStr(RoundingMode,
+ bool InBundle = false);
/// Returns a valid ExceptionBehavior enumerator when given a string
/// valid as input in constrained intrinsic exception behavior metadata.
-std::optional<fp::ExceptionBehavior> convertStrToExceptionBehavior(StringRef);
+std::optional<fp::ExceptionBehavior>
+convertStrToExceptionBehavior(StringRef, bool InBundle = false);
/// For any ExceptionBehavior enumerator, returns a string valid as
/// input in constrained intrinsic exception behavior metadata.
-std::optional<StringRef> convertExceptionBehaviorToStr(fp::ExceptionBehavior);
+std::optional<StringRef> convertExceptionBehaviorToStr(fp::ExceptionBehavior,
+ bool InBundle = false);
/// Returns true if the exception handling behavior and rounding mode
/// match what is used in the default floating point environment.
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 82c5a769ca44bcd..37a62770ed126cc 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -1338,15 +1338,6 @@ class IRBuilderBase {
return I;
}
- RoundingMode
- getEffectiveRounding(std::optional<RoundingMode> Rounding = std::nullopt) {
- RoundingMode RM = DefaultConstrainedRounding;
-
- if (Rounding)
- RM = *Rounding;
- return RM;
- }
-
Value *getConstrainedFPRounding(std::optional<RoundingMode> Rounding) {
RoundingMode UseRounding = DefaultConstrainedRounding;
@@ -1361,14 +1352,6 @@ class IRBuilderBase {
return MetadataAsValue::get(Context, RoundingMDS);
}
- fp::ExceptionBehavior getEffectiveExceptionBehavior(
- std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
- fp::ExceptionBehavior EB = DefaultConstrainedExcept;
- if (Except)
- EB = *Except;
- return EB;
- }
-
Value *getConstrainedFPExcept(std::optional<fp::ExceptionBehavior> Except) {
std::optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(
Except.value_or(DefaultConstrainedExcept));
@@ -2473,24 +2456,13 @@ class IRBuilderBase {
CallInst *CreateCall(FunctionType *FTy, Value *Callee,
ArrayRef<Value *> Args = {}, const Twine &Name = "",
MDNode *FPMathTag = nullptr) {
- CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
- if (IsFPConstrained)
- setConstrainedFPCallAttr(CI);
- if (isa<FPMathOperator>(CI))
- setFPAttrs(CI, FPMathTag, FMF);
- return Insert(CI, Name);
+ return CreateCall(FTy, Callee, Args, DefaultOperandBundles, Name,
+ FPMathTag);
}
CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
ArrayRef<OperandBundleDef> OpBundles,
- const Twine &Name = "", MDNode *FPMathTag = nullptr) {
- CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
- if (IsFPConstrained)
- setConstrainedFPCallAttr(CI);
- if (isa<FPMathOperator>(CI))
- setFPAttrs(CI, FPMathTag, FMF);
- return Insert(CI, Name);
- }
+ const Twine &Name = "", MDNode *FPMathTag = nullptr);
CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = {},
const Twine &Name = "", MDNode *FPMathTag = nullptr) {
@@ -2509,10 +2481,6 @@ class IRBuilderBase {
Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
std::optional<RoundingMode> Rounding = std::nullopt,
std::optional<fp::ExceptionBehavior> Except = std::nullopt);
- CallInst *CreateConstrainedFPCall(
- Intrinsic::ID ID, ArrayRef<Value *> Args, const Twine &Name = "",
- std::optional<RoundingMode> Rounding = std::nullopt,
- std::optional<fp::ExceptionBehavior> Except = std::nullopt);
Value *CreateSelect(Value *C, Value *True, Value *False,
const Twine &Name = "", Instruction *MDFrom = nullptr);
@@ -2715,17 +2683,10 @@ class IRBuilderBase {
void
createFPRoundingBundle(SmallVectorImpl<OperandBundleDef> &Bundles,
- std::optional<RoundingMode> Rounding = std::nullopt) {
- int RM = static_cast<int32_t>(getEffectiveRounding(Rounding));
- Bundles.emplace_back("fpe.round", getInt32(RM));
- }
-
+ std::optional<RoundingMode> Rounding = std::nullopt);
void createFPExceptionBundle(
SmallVectorImpl<OperandBundleDef> &Bundles,
- std::optional<fp::ExceptionBehavior> Except = std::nullopt) {
- int EB = getEffectiveExceptionBehavior(Except);
- Bundles.emplace_back("fpe.except", getInt32(EB));
- }
+ std::optional<fp::ExceptionBehavior> Except = std::nullopt);
};
/// This provides a uniform API for creating instructions and inserting
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index b2a20a231182ad8..2cc6c0359bf7ad1 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -1100,6 +1100,13 @@ template <typename InputTy> class OperandBundleDefT {
using OperandBundleDef = OperandBundleDefT<Value *>;
using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
+void addFPRoundingBundle(LLVMContext &Ctx,
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ RoundingMode Rounding);
+void addFPExceptionBundle(LLVMContext &Ctx,
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ fp::ExceptionBehavior Except);
+
//===----------------------------------------------------------------------===//
// CallBase Class
//===----------------------------------------------------------------------===//
diff --git a/llvm/include/llvm/IR/IntrinsicInst.h b/llvm/include/llvm/IR/IntrinsicInst.h
index b5134a9ee900c5e..a248a9612a82d0d 100644
--- a/llvm/include/llvm/IR/IntrinsicInst.h
+++ b/llvm/include/llvm/IR/IntrinsicInst.h
@@ -128,6 +128,10 @@ class IntrinsicInst : public CallInst {
/// course of IR transformations
static bool mayLowerToFunctionCall(Intrinsic::ID IID);
+ /// Check if the specified intrinsic can read or write FP environment.
+ /// Constrained intrinsics are not handled in it.
+ static bool canAccessFPEnvironment(Intrinsic::ID IID);
+
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const CallInst *I) {
if (const Function *CF = I->getCalledFunction())
@@ -139,6 +143,9 @@ class IntrinsicInst : public CallInst {
}
};
+std::optional<RoundingMode> getRoundingModeArg(const CallBase &I);
+std::optional<fp::ExceptionBehavior> getExceptionBehaviorArg(const CallBase &I);
+
/// Check if \p ID corresponds to a lifetime intrinsic.
static inline bool isLifetimeIntrinsic(Intrinsic::ID ID) {
switch (ID) {
diff --git a/llvm/include/llvm/IR/LLVMContext.h b/llvm/include/llvm/IR/LLVMContext.h
index cc7f66f17e2eb75..f8d428ffdddadd6 100644
--- a/llvm/include/llvm/IR/LLVMContext.h
+++ b/llvm/include/llvm/IR/LLVMContext.h
@@ -96,7 +96,7 @@ class LLVMContext {
OB_ptrauth = 7, // "ptrauth"
OB_kcfi = 8, // "kcfi"
OB_convergencectrl = 9, // "convergencectrl"
- OB_fpe_round = 10, // "fpe.round"
+ OB_fpe_control = 10, // "fpe.control"
OB_fpe_except = 11, // "fpe.except"
};
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 56becdcea896c92..9c0d779a20d43c0 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6317,16 +6317,19 @@ bool isOldDbgFormatIntrinsic(StringRef Name) {
FnID == Intrinsic::dbg_assign;
}
-bool updateConstrainedIntrinsic(StringRef Name, ArrayRef<Value *> Args,
- SmallVectorImpl<OperandBundleDef> &Bundles,
- LLVMContext &C) {
+void LLParser::updateConstrainedIntrinsic(
+ ValID &CalleeID, SmallVectorImpl<LLParser::ParamInfo> &Args,
+ SmallVectorImpl<OperandBundleDef> &Bundles, AttrBuilder &FnAttrs) {
if (Args.empty())
- return false;
- if (!Name.starts_with("llvm.experimental.constrained."))
- return false;
+ return;
+
+ StringRef Name = CalleeID.StrVal;
+ if (!Name.consume_front("llvm.experimental.constrained."))
+ return;
+
for (auto &B : Bundles) {
if (B.getTag().starts_with("fpe."))
- return false;
+ return;
}
const auto getMetadataArgumentValue = [](Value *Arg) -> StringRef {
@@ -6340,25 +6343,24 @@ bool updateConstrainedIntrinsic(StringRef Name, ArrayRef<Value *> Args,
};
if (Args.size() > 1) {
- Value *V = Args.take_back(2).front();
- if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
- if (auto RM = convertStrToRoundingMode(VStr)) {
- int RMVal = static_cast<int>(*RM);
- Bundles.emplace_back("fpe.round",
- ConstantInt::get(Type::getInt32Ty(C), RMVal));
- }
+ Value *V = Args[Args.size() - 2].V;
+ StringRef VStr = getMetadataArgumentValue(V);
+ if (!VStr.empty()) {
+ if (auto RM = convertStrToRoundingMode(VStr))
+ addFPRoundingBundle(Context, Bundles, *RM);
}
}
- Value *V = Args.back();
- if (StringRef VStr = getMetadataArgumentValue(V); !VStr.empty()) {
- if (auto EB = convertStrToExceptionBehavior(VStr)) {
- Bundles.emplace_back("fpe.except",
- ConstantInt::get(Type::getInt32Ty(C), *EB));
- }
+ Value *V = Args.back().V;
+ StringRef VStr = getMetadataArgumentValue(V);
+ if (!VStr.empty()) {
+ if (auto EB = convertStrToExceptionBehavior(VStr))
+ addFPExceptionBundle(Context, Bundles, *EB);
}
- return true;
+ MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+ FnAttrs.addAttribute(Attribute::getWithMemoryEffects(Context, ME));
+ FnAttrs.addAttribute(Attribute::StrictFP);
}
/// FunctionHeader
@@ -8087,6 +8089,8 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
parseOptionalOperandBundles(BundleList, PFS))
return true;
+ updateConstrainedIntrinsic(CalleeID, ArgList, BundleList, FnAttrs);
+
// If RetType is a non-function pointer type, then this is the short syntax
// for the call, which means that RetType is just the return type. Infer the
// rest of the function argument types from the arguments that are present.
@@ -8134,8 +8138,6 @@ bool LLParser::parseCall(Instruction *&Inst, PerFunctionState &PFS,
AttributeList::get(Context, AttributeSet::get(Context, FnAttrs),
AttributeSet::get(Context, RetAttrs), Attrs);
- updateConstrainedIntrinsic(CalleeID.StrVal, Args, BundleList, Context);
-
CallInst *CI = CallInst::Create(Ty, Callee, Args, BundleList);
CI->setTailCallKind(TCK);
CI->setCallingConv(CC);
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index a01ecf0d56642e5..f2c3e6686f1639f 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -7134,9 +7134,11 @@ Error BitcodeReader::materializeModule() {
if (CallInst *CI = dyn_cast<CallInst>(U))
UpgradeIntrinsicCall(CI, I.second);
}
- if (!I.first->use_empty())
- I.first->replaceAllUsesWith(I.second);
- I.first->eraseFromParent();
+ if (I.second) {
+ if (!I.first->use_empty())
+ I.first->replaceAllUsesWith(I.second);
+ I.first->eraseFromParent();
+ }
}
UpgradedIntrinsics.clear();
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index cc2b112e9ec858e..4f446edec5aee88 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1193,6 +1193,8 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
F->getParent(), ID, F->getFunctionType()->getReturnType());
return true;
}
+ if (Name.starts_with("experimental.constrained."))
+ return true;
break; // No other 'e*'.
case 'f':
if (Name.starts_with("flt.rounds")) {
@@ -4326,34 +4328,24 @@ static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI) {
static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
IRBuilder<> &Builder) {
- if (CB->getOperandBundle(LLVMContext::OB_fpe_round))
+ if (CB->getOperandBundle(LLVMContext::OB_fpe_control) ||
+ CB->getOperandBundle(LLVMContext::OB_fpe_except))
return nullptr;
- auto *CFPI = cast<ConstrainedFPIntrinsic>(F);
SmallVector<OperandBundleDef, 2> NewBundles;
- LLVMContext &C = CB->getContext();
- auto RM = CFPI->getRoundingMode();
+ auto RM = getRoundingModeArg(*CB);
if (RM) {
auto CurrentRM = CB->getRoundingMode();
- if (CurrentRM) {
- assert(*RM == *CurrentRM);
- } else {
- int RMValue = static_cast<int>(*RM);
- NewBundles.emplace_back("fpe.round",
- ConstantInt::get(Type::getInt32Ty(C), RMValue));
- }
+ assert(!CurrentRM && "unexpected rounding bundle");
+ Builder.createFPRoundingBundle(NewBundles, RM);
}
- auto EB = CFPI->getExceptionBehavior();
+ auto EB = getExceptionBehaviorArg(*CB);
if (EB) {
auto CurrentEB = CB->getExceptionBehavior();
- if (CurrentEB) {
- assert(*EB == *CurrentEB);
- } else {
- NewBundles.emplace_back("fpe.except",
- ConstantInt::get(Type::getInt32Ty(C), *EB));
- }
+ assert(!CurrentEB && "unexpected exception bundle");
+ Builder.createFPExceptionBundle(NewBundles, EB);
}
CallInst *NewCB = nullptr;
@@ -4374,9 +4366,11 @@ static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
FastMathFlags FMF = CB->getFastMathFlags();
NewCB->setFastMathFlags(FMF);
}
+
MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
- auto A = Attribute::getWithMemoryEffects(C, ME);
+ auto A = Attribute::getWithMemoryEffects(CB->getContext(), ME);
NewCB->addFnAttr(A);
+ NewCB->addFnAttr(Attribute::StrictFP);
}
return NewCB;
@@ -4410,7 +4404,7 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
bool IsARM = Name.consume_front("arm.");
bool IsAMDGCN = Name.consume_front("amdgcn.");
bool IsDbg = Name.consume_front("dbg.");
- bool IsConstrained = Name.starts_with("experimental.constrained.");
+ bool IsConstrained = Name.consume_front("experimental.constrained.");
Value *Rep = nullptr;
if (!IsX86 && Name == "stackprotectorcheck") {
@@ -4441,6 +4435,8 @@ void llvm::UpgradeIntrinsicCall(CallBase *CI, Function *NewFn) {
}
} else if (IsConstrained) {
Rep = upgradeConstrainedIntrinsicCall(CI, F, Builder);
+ if (!Rep)
+ return;
} else {
llvm_unreachable("Unknown function for CallBase upgrade.");
}
@@ -4964,7 +4960,8 @@ void llvm::UpgradeCallsToIntrinsic(Function *F) {
UpgradeIntrinsicCall(CB, NewFn);
// Remove old function, no longer used, from the module.
- F->eraseFromParent();
+ if (NewFn)
+ F->eraseFromParent();
}
}
@@ -5705,20 +5702,3 @@ void llvm::UpgradeOperandBundles(std::vector<OperandBundleDef> &Bundles) {
OBD.inputs().empty();
});
}
-
-CallBase *llvm::upgradeConstrainedFunctionCall(CallBase *CB) {
- Function *F = dyn_cast<Function>(CB->getCalledOperand());
- if (!F)
- return nullptr;
-
- if (CB->getNumOperands() < 1)
- return nullptr;
-
- StringRef Name = F->getName();
- if (!Name.starts_with("experimental.constrained."))
- return nullptr;
-
- LLVMContext &C = CB->getContext();
- IRBuilder<> Builder(C);
- return upgradeConstrainedIntrinsicCall(CB, F, Builder);
-}
diff --git a/llvm/lib/IR/FPEnv.cpp b/llvm/lib/IR/FPEnv.cpp
index 67f21d3756e936a..91a962eb8190bc8 100644
--- a/llvm/lib/IR/FPEnv.cpp
+++ b/llvm/lib/IR/FPEnv.cpp
@@ -21,7 +21,18 @@
namespace llvm {
-std::optional<RoundingMode> convertStrToRoundingMode(StringRef RoundingArg) {
+std::optional<RoundingMode> convertStrToRoundingMode(StringRef RoundingArg,
+ bool InBundle) {
+ if (InBundle)
+ return StringSwitch<std::optional<RoundingMode>>(RoundingArg)
+ .Case("dyn", RoundingMode::Dynamic)
+ .Case("rte", RoundingMode::NearestTiesToEven)
+ .Case("rmm", RoundingMode::NearestTiesToAway)
+ .Case("rtn", RoundingMode::TowardNegative)
+ .Case("rtp", RoundingMode::TowardPositive)
+ .Case("rtz", RoundingMode::TowardZero)
+ .Default(std::nullopt);
+
// For dynamic rounding mode, we use round to nearest but we will set the
// 'exact' SDNodeFlag so that the value will not be rounded.
return StringSwitch<std::optional<RoundingMode>>(RoundingArg)
@@ -34,26 +45,27 @@ std::optional<RoundingMode> convertStrToRoundingMode(StringRef RoundingArg) {
.Default(std::nullopt);
}
-std::optional<StringRef> convertRoundingModeToStr(RoundingMode UseRounding) {
+std::optional<StringRef> convertRoundingModeToStr(RoundingMode UseRounding,
+ bool InBundle) {
std::optional<StringRef> RoundingStr;
switch (UseRounding) {
case RoundingMode::Dynamic:
- RoundingStr = "round.dynamic";
+ RoundingStr = InBundle ? "dyn" : "round.dynamic";
break;
case RoundingMode::NearestTiesToEven:
- RoundingStr = "round.tonearest";
+ RoundingStr = InBundle ? "rte" : "round.tonearest";
break;
case RoundingMode::NearestTiesToAway:
- RoundingStr = "round.tonearestaway";
+ RoundingStr = InBundle ? "rmm" : "round.tonearestaway";
break;
case RoundingMode::TowardNegative:
- RoundingStr = "round.downward";
+ RoundingStr = InBundle ? "rtn" : "round.downward";
break;
case RoundingMode::TowardPositive:
- RoundingStr = "round.upward";
+ RoundingStr = InBundle ? "rtp" : "round.upward";
break;
case RoundingMode::TowardZero:
- RoundingStr = "round.towardzero";
+ RoundingStr = InBundle ? "rtz" : "round.towardzero";
break;
default:
break;
@@ -62,7 +74,14 @@ std::optional<StringRef> convertRoundingModeToStr(RoundingMode UseRounding) {
}
std::optional<fp::ExceptionBehavior>
-convertStrToExceptionBehavior(StringRef ExceptionArg) {
+convertStrToExceptionBehavior(StringRef ExceptionArg, bool InBundle) {
+ if (InBundle)
+ return StringSwitch<std::optional<fp::ExceptionBehavior>>(ExceptionArg)
+ .Case("ignore", fp::ebIgnore)
+ .Case("maytrap", fp::ebMayTrap)
+ .Case("strict", fp::ebStrict)
+ .Default(std::nullopt);
+
return StringSwitch<std::optional<fp::ExceptionBehavior>>(ExceptionArg)
.Case("fpexcept.ignore", fp::ebIgnore)
.Case("fpexcept.maytrap", fp::ebMayTrap)
@@ -71,17 +90,17 @@ convertStrToExceptionBehavior(StringRef ExceptionArg) {
}
std::optional<StringRef>
-convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept) {
+convertExceptionBehaviorToStr(fp::ExceptionBehavior UseExcept, bool InBundle) {
std::optional<StringRef> ExceptStr;
switch (UseExcept) {
case fp::ebStrict:
- ExceptStr = "fpexcept.strict";
+ ExceptStr = InBundle ? "strict" : "fpexcept.strict";
break;
case fp::ebIgnore:
- ExceptStr = "fpexcept.ignore";
+ ExceptStr = InBundle ? "ignore" : "fpexcept.ignore";
break;
case fp::ebMayTrap:
- ExceptStr = "fpexcept.maytrap";
+ ExceptStr = InBundle ? "maytrap" : "fpexcept.maytrap";
break;
}
return ExceptStr;
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index 95e91930210f072..abc20acdefddacd 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -86,6 +86,43 @@ IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
return CI;
}
+CallInst *IRBuilderBase::CreateCall(FunctionType *FTy, Value *Callee,
+ ArrayRef<Value *> Args,
+ ArrayRef<OperandBundleDef> OpBundles,
+ const Twine &Name, MDNode *FPMathTag) {
+ ArrayRef<OperandBundleDef> ActualBundlesRef = OpBundles;
+ SmallVector<OperandBundleDef, 2> ActualBundles;
+
+ if (IsFPConstrained) {
+ if (const auto *Func = dyn_cast<Function>(Callee)) {
+ if (Intrinsic::ID ID = Func->getIntrinsicID()) {
+ if (IntrinsicInst::canAccessFPEnvironment(ID)) {
+ bool NeedRound = true, NeedExcept = true;
+ for (const auto &Item : OpBundles) {
+ if (NeedRound && Item.getTag() == "fpe.round")
+ NeedRound = false;
+ else if (NeedExcept && Item.getTag() == "fpe.except")
+ NeedExcept = false;
+ ActualBundles.push_back(Item);
+ }
+ if (NeedRound && Intrinsic::hasConstrainedFPRoundingModeOperand(ID))
+ createFPRoundingBundle(ActualBundles);
+ if (NeedExcept)
+ createFPExceptionBundle(ActualBundles);
+ ActualBundlesRef = ActualBundles;
+ }
+ }
+ }
+ }
+
+ CallInst *CI = CallInst::Create(FTy, Callee, Args, ActualBundlesRef);
+ if (IsFPConstrained)
+ setConstrainedFPCallAttr(CI);
+ if (isa<FPMathOperator>(CI))
+ setFPAttrs(CI, FPMathTag, FMF);
+ return Insert(CI, Name);
+}
+
Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
if (cast<ConstantInt>(Scaling)->isZero())
@@ -904,7 +941,7 @@ CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
Instruction *FMFSource,
const Twine &Name) {
Module *M = BB->getModule();
- Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
+ Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, Types);
return createCallHelper(Fn, Args, Name, FMFSource, OpBundles);
}
@@ -962,9 +999,8 @@ CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
FastMathFlags UseFMF = FMFSource.get(FMF);
- SmallVector<OperandBundleDef, 2> OpBundles;
- int EB = getEffectiveExceptionBehavior(Except);
- OpBundles.emplace_back("fpe.except", getInt32(EB));
+ SmallVector<OperandBundleDef, 1> OpBundles;
+ createFPExceptionBundle(OpBundles, Except);
CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, OpBundles,
nullptr, Name);
@@ -1041,8 +1077,7 @@ CallInst *IRBuilderBase::CreateConstrainedFPCmp(
Value *ExceptV = getConstrainedFPExcept(Except);
SmallVector<OperandBundleDef, 1> OpBundles;
- int EB = getEffectiveExceptionBehavior(Except);
- OpBundles.emplace_back("fpe.except", getInt32(EB));
+ createFPExceptionBundle(OpBundles, Except);
CallInst *C = CreateIntrinsic(ID, {L->getType()}, {L, R, PredicateV, ExceptV},
OpBundles, nullptr, Name);
@@ -1306,6 +1341,20 @@ CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
}
+void IRBuilderBase::createFPRoundingBundle(
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ std::optional<RoundingMode> Rounding) {
+ addFPRoundingBundle(Context, Bundles,
+ Rounding.value_or(DefaultConstrainedRounding));
+}
+
+void IRBuilderBase::createFPExceptionBundle(
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ std::optional<fp::ExceptionBehavior> Except) {
+ addFPExceptionBundle(Context, Bundles,
+ Except.value_or(DefaultConstrainedExcept));
+}
+
IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
IRBuilderFolder::~IRBuilderFolder() = default;
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 2a851802a12325c..64c08eb8b37889a 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -605,19 +605,19 @@ bool CallBase::hasClobberingOperandBundles() const {
}
std::optional<RoundingMode> CallBase::getRoundingMode() const {
- if (auto RoundingBundle = getOperandBundle(LLVMContext::OB_fpe_round)) {
- uint64_t RM =
- cast<ConstantInt>(RoundingBundle->Inputs.front())->getSExtValue();
- return castToRoundingMode(RM);
+ if (auto RoundingBundle = getOperandBundle(LLVMContext::OB_fpe_control)) {
+ Value *V = RoundingBundle->Inputs.front();
+ Metadata *MD = cast<MetadataAsValue>(V)->getMetadata();
+ return convertStrToRoundingMode(cast<MDString>(MD)->getString(), true);
}
return std::nullopt;
}
std::optional<fp::ExceptionBehavior> CallBase::getExceptionBehavior() const {
if (auto ExceptionBundle = getOperandBundle(LLVMContext::OB_fpe_except)) {
- uint64_t EB =
- cast<ConstantInt>(ExceptionBundle->Inputs.front())->getZExtValue();
- return castToExceptionBehavior(EB);
+ Value *V = ExceptionBundle->Inputs.front();
+ Metadata *MD = cast<MetadataAsValue>(V)->getMetadata();
+ return convertStrToExceptionBehavior(cast<MDString>(MD)->getString(), true);
}
return std::nullopt;
}
@@ -693,6 +693,26 @@ void CallBase::setOnlyAccessesInaccessibleMemOrArgMem() {
MemoryEffects::inaccessibleOrArgMemOnly());
}
+void llvm::addFPRoundingBundle(LLVMContext &Ctx,
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ RoundingMode Rounding) {
+ std::optional<StringRef> RndStr = convertRoundingModeToStr(Rounding, true);
+ assert(RndStr && "Garbage rounding mode!");
+ auto *RoundingMDS = MDString::get(Ctx, *RndStr);
+ auto *RM = MetadataAsValue::get(Ctx, RoundingMDS);
+ Bundles.emplace_back("fpe.control", RM);
+}
+
+void llvm::addFPExceptionBundle(LLVMContext &Ctx,
+ SmallVectorImpl<OperandBundleDef> &Bundles,
+ fp::ExceptionBehavior Except) {
+ std::optional<StringRef> ExcStr = convertExceptionBehaviorToStr(Except, true);
+ assert(ExcStr && "Garbage exception behavior!");
+ auto *ExceptMDS = MDString::get(Ctx, *ExcStr);
+ auto *EB = MetadataAsValue::get(Ctx, ExceptMDS);
+ Bundles.emplace_back("fpe.except", EB);
+}
+
//===----------------------------------------------------------------------===//
// CallInst Implementation
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/IR/IntrinsicInst.cpp b/llvm/lib/IR/IntrinsicInst.cpp
index 2d2011cd44ff8b1..0b832e371136ea0 100644
--- a/llvm/lib/IR/IntrinsicInst.cpp
+++ b/llvm/lib/IR/IntrinsicInst.cpp
@@ -66,6 +66,39 @@ bool IntrinsicInst::mayLowerToFunctionCall(Intrinsic::ID IID) {
}
}
+bool IntrinsicInst::canAccessFPEnvironment(Intrinsic::ID IID) {
+ switch (IID) {
+#define FUNCTION(NAME, A, R, I) case Intrinsic::NAME:
+#include "llvm/IR/ConstrainedOps.def"
+ return true;
+ default:
+ return false;
+ }
+}
+
+std::optional<RoundingMode> llvm::getRoundingModeArg(const CallBase &I) {
+ unsigned NumOperands = I.arg_size();
+ Metadata *MD = nullptr;
+ auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 2));
+ if (MAV)
+ MD = MAV->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return std::nullopt;
+ return convertStrToRoundingMode(cast<MDString>(MD)->getString());
+}
+
+std::optional<fp::ExceptionBehavior>
+llvm::getExceptionBehaviorArg(const CallBase &I) {
+ unsigned NumOperands = I.arg_size();
+ Metadata *MD = nullptr;
+ auto *MAV = dyn_cast<MetadataAsValue>(I.getArgOperand(NumOperands - 1));
+ if (MAV)
+ MD = MAV->getMetadata();
+ if (!MD || !isa<MDString>(MD))
+ return std::nullopt;
+ return convertStrToExceptionBehavior(cast<MDString>(MD)->getString());
+}
+
//===----------------------------------------------------------------------===//
/// DbgVariableIntrinsic - This is the common base class for debug info
/// intrinsics for variables.
diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp
index f0f9673f3e8cc9f..d21a7e6a42bec36 100644
--- a/llvm/lib/IR/LLVMContext.cpp
+++ b/llvm/lib/IR/LLVMContext.cpp
@@ -82,8 +82,8 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
assert(Entry->second == BundleTagID && "operand bundle id drifted!");
}
- auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.round");
- assert(RoundingEntry->second == LLVMContext::OB_fpe_round &&
+ auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.control");
+ assert(RoundingEntry->second == LLVMContext::OB_fpe_control &&
"fpe.round operand bundle id drifted!");
(void)RoundingEntry;
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 712836a227b347c..57ba1fbc0734465 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -3794,23 +3794,32 @@ void Verifier::visitCallBase(CallBase &Call) {
"Multiple \"clang.arc.attachedcall\" operand bundles", Call);
FoundAttachedCallBundle = true;
verifyAttachedCallBundle(Call, BU);
- } else if (Tag == LLVMContext::OB_fpe_round) {
+ } else if (Tag == LLVMContext::OB_fpe_control) {
Check(!FoundFpeRoundBundle, "Multiple fpe.round operand bundles", Call);
Check(BU.Inputs.size() == 1,
"Expected exactly one fpe.round bundle operand", Call);
- auto RM = dyn_cast<ConstantInt>(BU.Inputs.front());
- Check(RM, "Value of fpe.round bundle operand must be an integer", Call);
- Check(isValidRoundingMode(RM->getSExtValue()),
- "Invalid value of fpe.round bundle operand", Call);
+ auto *V = dyn_cast<MetadataAsValue>(BU.Inputs.front());
+ Check(V, "Value of fpe.round bundle operand must be a metadata", Call);
+ auto *MDS = dyn_cast<MDString>(V->getMetadata());
+ Check(MDS, "Value of fpe.round bundle operand must be a string", Call);
+ auto RM = convertStrToRoundingMode(MDS->getString(), true);
+ Check(RM.has_value(),
+ "Value of fpe.round bundle operand is not a correct rounding mode",
+ Call);
FoundFpeRoundBundle = true;
} else if (Tag == LLVMContext::OB_fpe_except) {
Check(!FoundFpeExceptBundle, "Multiple fpe.except operand bundles", Call);
Check(BU.Inputs.size() == 1,
"Expected exactly one fpe.except bundle operand", Call);
- auto EB = dyn_cast<ConstantInt>(BU.Inputs.front());
- Check(EB, "Value of fpe.except bundle operand must be an integer", Call);
- Check(isValidExceptionBehavior(EB->getZExtValue()),
- "Invalid value of fpe.except bundle operand", Call);
+ auto *V = dyn_cast<MetadataAsValue>(BU.Inputs.front());
+ Check(V, "Value of fpe.except bundle operand must be a metadata", Call);
+ auto *MDS = dyn_cast<MDString>(V->getMetadata());
+ Check(MDS, "Value of fpe.except bundle operand must be a string", Call);
+ auto EB = convertStrToExceptionBehavior(MDS->getString(), true);
+ Check(EB.has_value(),
+ "Value of fpe.except bundle operand is not a correct exception "
+ "behavior",
+ Call);
FoundFpeExceptBundle = true;
}
}
@@ -3852,23 +3861,25 @@ void Verifier::verifyConstrainedInstrinsicCall(const CallBase &CB) {
// FP metadata arguments must not conflict with the corresponding
// operand bundles.
- if (std::optional<RoundingMode> RM = CFPI->getRoundingMode()) {
+ if (std::optional<RoundingMode> RM = getRoundingModeArg(CB)) {
RoundingMode Rounding = *RM;
- auto RoundingBundle = CB.getOperandBundle(LLVMContext::OB_fpe_round);
+ auto RoundingBundle = CB.getOperandBundle(LLVMContext::OB_fpe_control);
Check(RoundingBundle,
"Constrained intrinsic has a rounding argument but the call does not",
CB);
if (RoundingBundle) {
- OperandBundleUse OBU = *RoundingBundle;
- uint64_t BundleRM = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
- Check(BundleRM == static_cast<uint64_t>(Rounding),
- "Rounding mode of the constrained intrinsic differs from that in "
- "operand bundle",
- CB);
+ std::optional<RoundingMode> RMByBundle = CB.getRoundingMode();
+ Check(RMByBundle, "Invalid value of rounding mode bundle", CB);
+ if (RMByBundle) {
+ Check(*RMByBundle == Rounding,
+ "Rounding mode of the constrained intrinsic differs from that in "
+ "operand bundle",
+ CB);
+ }
}
}
- if (std::optional<fp::ExceptionBehavior> EB = CFPI->getExceptionBehavior()) {
+ if (std::optional<fp::ExceptionBehavior> EB = getExceptionBehaviorArg(CB)) {
fp::ExceptionBehavior Excepts = *EB;
auto ExceptionBundle = CB.getOperandBundle(LLVMContext::OB_fpe_except);
Check(ExceptionBundle,
@@ -3876,12 +3887,16 @@ void Verifier::verifyConstrainedInstrinsicCall(const CallBase &CB) {
"call does not",
CB);
if (ExceptionBundle) {
- OperandBundleUse OBU = *ExceptionBundle;
- uint64_t BundleEB = cast<ConstantInt>(OBU.Inputs.front())->getZExtValue();
- Check(BundleEB == static_cast<uint64_t>(Excepts),
+ std::optional<fp::ExceptionBehavior> EBByBundle =
+ CB.getExceptionBehavior();
+ Check(EBByBundle, "Invalid value of exception behavior bundle", CB);
+ if (EBByBundle) {
+ Check(
+ *EBByBundle == Excepts,
"Exception behavior of the constrained intrinsic differs from that "
"in operand bundle",
CB);
+ }
}
}
}
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 478691d031150f4..7538c84b03bfa41 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -255,7 +255,7 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) {
CI->isNoTailCall() ||
CI->hasOperandBundlesOtherThan(
{LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_ptrauth,
- LLVMContext::OB_kcfi, LLVMContext::OB_fpe_round,
+ LLVMContext::OB_kcfi, LLVMContext::OB_fpe_control,
LLVMContext::OB_fpe_except});
if (!IsNoTail && CI->doesNotAccessMemory()) {
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 64648f9459cae99..6fa3f1dea825136 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -440,7 +440,6 @@ struct PruningFunctionCloner {
Instruction *
PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
const Instruction &OldInst = *II;
- Instruction *NewInst = nullptr;
if (HostFuncIsStrictFP) {
Intrinsic::ID CIID = getConstrainedIntrinsicID(OldInst);
if (CIID != Intrinsic::not_intrinsic) {
@@ -498,24 +497,21 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
if (Intrinsic::hasConstrainedFPRoundingModeOperand(CIID)) {
Args.push_back(
MetadataAsValue::get(Ctx, MDString::get(Ctx, "round.tonearest")));
- Bundles.emplace_back(
- "fpe.round",
- ConstantInt::get(
- Type::getInt32Ty(Ctx),
- static_cast<int>(RoundingMode::NearestTiesToEven)));
+ addFPRoundingBundle(Ctx, Bundles, RoundingMode::NearestTiesToEven);
}
Args.push_back(
MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore")));
- Bundles.emplace_back("fpe.except",
- ConstantInt::get(Type::getInt32Ty(Ctx),
- fp::ExceptionBehavior::ebIgnore));
- NewInst =
+ addFPExceptionBundle(Ctx, Bundles, fp::ExceptionBehavior::ebIgnore);
+ auto *NewConstrainedInst =
CallInst::Create(IFn, Args, Bundles, OldInst.getName() + ".strict");
+
+ MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+ auto A = Attribute::getWithMemoryEffects(Ctx, ME);
+ NewConstrainedInst->addFnAttr(A);
+ return NewConstrainedInst;
}
}
- if (!NewInst)
- NewInst = II->clone();
- return NewInst;
+ return OldInst.clone();
}
/// The specified block is found to be reachable, clone it and
diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll b/llvm/test/Bitcode/auto-upgrade-constrained.ll
new file mode 100644
index 000000000000000..8e3f2c4ad77896e
--- /dev/null
+++ b/llvm/test/Bitcode/auto-upgrade-constrained.ll
@@ -0,0 +1,327 @@
+; RUN: llvm-as < %s | llvm-dis | FileCheck %s
+; RUN: llvm-dis %s.bc -o - | FileCheck %s
+
+define float @test_fadd(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_fadd(
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+
+define float @test_fsub(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.fsub.f32(float %a, float %b, metadata !"round.tonearest", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_fsub(
+; CHECK: call float @llvm.experimental.constrained.fsub.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fmul(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.fmul.f32(float %a, float %b, metadata !"round.downward", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_fmul(
+; CHECK: call float @llvm.experimental.constrained.fmul.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fdiv(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.fdiv.f32(float %a, float %b, metadata !"round.upward", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_fdiv(
+; CHECK: call float @llvm.experimental.constrained.fdiv.f32(float {{.*}}, float {{.*}}, metadata !"round.upward", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtp"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_frem(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.frem.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_frem(
+; CHECK: call float @llvm.experimental.constrained.frem.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fma(float %a, float %b, float %c) strictfp {
+ %res = call float @llvm.experimental.constrained.fma.f32(float %a, float %b, float %c, metadata !"round.towardzero", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_fma(
+; CHECK: call float @llvm.experimental.constrained.fma.f32(float {{.*}}, float {{.*}}, float {{.*}}, metadata !"round.towardzero", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rtz"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fmuladd(float %a, float %b, float %c) strictfp {
+ %res = call float @llvm.experimental.constrained.fmuladd.f32(float %a, float %b, float %c, metadata !"round.tonearestaway", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_fmuladd(
+; CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float {{.*}}, float {{.*}}, float {{.*}}, metadata !"round.tonearestaway", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"rmm"), "fpe.except"(metadata !"ignore") ]
+
+define i32 @test_fptosi(float %a) strictfp {
+ %res = call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.ignore")
+ ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_fptosi(
+; CHECK: call i32 @llvm.experimental.constrained.fptosi.i32.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.except"(metadata !"ignore") ]
+
+define i32 @test_fptoui(float %a) strictfp {
+ %res = call i32 @llvm.experimental.constrained.fptoui.f32.i32(float %a, metadata !"fpexcept.strict")
+ ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_fptoui(
+; CHECK: call i32 @llvm.experimental.constrained.fptoui.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_sitofp(i32 %a) strictfp {
+ %res = call float @llvm.experimental.constrained.sitofp.i32.f32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_sitofp(
+; CHECK: call float @llvm.experimental.constrained.sitofp.f32.i32(i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_uitofp(i32 %a) strictfp {
+ %res = call float @llvm.experimental.constrained.uitofp.i32.f32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_uitofp(
+; CHECK: call float @llvm.experimental.constrained.uitofp.f32.i32(i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define float @test_fptrunc(double %a) strictfp {
+ %res = call float @llvm.experimental.constrained.fptrunc.f32.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_fptrunc(
+; CHECK: call float @llvm.experimental.constrained.fptrunc.f32.f64(double {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"ignore") ]
+
+define double @test_fpext(float %a) strictfp {
+ %res = call double @llvm.experimental.constrained.fpext.f64.f32(float %a, metadata !"fpexcept.ignore")
+ ret double %res
+}
+; CHECK-LABEL: define double @test_fpext(
+; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]] [ "fpe.except"(metadata !"ignore") ]
+
+define float @test_sqrt(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.sqrt.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_sqrt(
+; CHECK: call float @llvm.experimental.constrained.sqrt.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_powi(float %a, i32 %b) strictfp {
+ %res = call float @llvm.experimental.constrained.powi.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_powi(
+; CHECK: call float @llvm.experimental.constrained.powi.f32(float {{.*}}, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_ldexp(float %a, i32 %b) strictfp {
+ %res = call float @llvm.experimental.constrained.ldexp.f32.i32(float %a, i32 %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_ldexp(
+; CHECK: call float @llvm.experimental.constrained.ldexp.f32.i32(float {{.*}}, i32 {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_asin(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.asin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_asin(
+; CHECK: call float @llvm.experimental.constrained.asin.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_acos(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.acos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_acos(
+; CHECK: call float @llvm.experimental.constrained.acos.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_atan(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.atan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_atan(
+; CHECK: call float @llvm.experimental.constrained.atan.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_sin(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.sin.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_sin(
+; CHECK: call float @llvm.experimental.constrained.sin.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_cos(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.cos.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_cos(
+; CHECK: call float @llvm.experimental.constrained.cos.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_tan(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.tan.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_tan(
+; CHECK: call float @llvm.experimental.constrained.tan.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_sinh(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.sinh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_sinh(
+; CHECK: call float @llvm.experimental.constrained.sinh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_cosh(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.cosh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_cosh(
+; CHECK: call float @llvm.experimental.constrained.cosh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_tanh(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.tanh.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_tanh(
+; CHECK: call float @llvm.experimental.constrained.tanh.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_pow(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.pow.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_pow(
+; CHECK: call float @llvm.experimental.constrained.pow.f32(float {{.*}}, float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_log(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.log.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_log(
+; CHECK: call float @llvm.experimental.constrained.log.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_log10(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.log10.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_log10(
+; CHECK: call float @llvm.experimental.constrained.log10.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_log2(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.log2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_log2(
+; CHECK: call float @llvm.experimental.constrained.log2.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_exp(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.exp.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_exp(
+; CHECK: call float @llvm.experimental.constrained.exp.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_exp2(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.exp2.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_exp2(
+; CHECK: call float @llvm.experimental.constrained.exp2.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_rint(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.rint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_rint(
+; CHECK: call float @llvm.experimental.constrained.rint.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_nearbyint(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.nearbyint.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_nearbyint(
+; CHECK: call float @llvm.experimental.constrained.nearbyint.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define i32 @test_lrint(float %a) strictfp {
+ %res = call i32 @llvm.experimental.constrained.lrint.i32.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_lrint(
+; CHECK: call i32 @llvm.experimental.constrained.lrint.i32.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define i32 @test_llrint(float %a) strictfp {
+ %res = call i32 @llvm.experimental.constrained.llrint.i32.f32(float %a, metadata !"round.dynamic", metadata !"fpexcept.strict")
+ ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_llrint(
+; CHECK: call i32 @llvm.experimental.constrained.llrint.i32.f32(float {{.*}}, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+
+define float @test_maxnum(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.maxnum.f32(float %a, float %b, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_maxnum(
+; CHECK: call float @llvm.experimental.constrained.maxnum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_minnum(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.minnum.f32(float %a, float %b, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_minnum(
+; CHECK: call float @llvm.experimental.constrained.minnum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_maximum(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.maximum.f32(float %a, float %b, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_maximum(
+; CHECK: call float @llvm.experimental.constrained.maximum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_minimum(float %a, float %b) strictfp {
+ %res = call float @llvm.experimental.constrained.minimum.f32(float %a, float %b, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_minimum(
+; CHECK: call float @llvm.experimental.constrained.minimum.f32(float {{.*}}, float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_ceil(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_ceil(
+; call float @llvm.experimental.constrained.ceil.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_floor(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.floor.f32(float %a, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_floor(
+; call float @llvm.experimental.constrained.floor.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define i32 @test_lround(float %a) strictfp {
+ %res = call i32 @llvm.experimental.constrained.lround.i32.f32(float %a, metadata !"fpexcept.strict")
+ ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_lround(
+; CHECK: call i32 @llvm.experimental.constrained.lround.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define i32 @test_llround(float %a) strictfp {
+ %res = call i32 @llvm.experimental.constrained.llround.i32.f32(float %a, metadata !"fpexcept.strict")
+ ret i32 %res
+}
+; CHECK-LABEL: define i32 @test_llround(
+; CHECK: call i32 @llvm.experimental.constrained.llround.i32.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_round(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.round.f32(float %a, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_round(
+; CHECK: call float @llvm.experimental.constrained.round.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_roundeven(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.roundeven.f32(float %a, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_roundeven(
+; CHECK: call float @llvm.experimental.constrained.roundeven.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+define float @test_trunc(float %a) strictfp {
+ %res = call float @llvm.experimental.constrained.trunc.f32(float %a, metadata !"fpexcept.strict")
+ ret float %res
+}
+; CHECK-LABEL: define float @test_trunc(
+; CHECK: call float @llvm.experimental.constrained.trunc.f32(float {{.*}}, metadata !"fpexcept.strict") #[[ATTR0]] [ "fpe.except"(metadata !"strict") ]
+
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
diff --git a/llvm/test/Bitcode/auto-upgrade-constrained.ll.bc b/llvm/test/Bitcode/auto-upgrade-constrained.ll.bc
new file mode 100644
index 0000000000000000000000000000000000000000..75a84901b5cbc37231b35fbed978c176d4c1afa0
GIT binary patch
literal 8120
zcmb7I4^R_l7T^3K8z62{#7Z<tH%L3wDvePgm at oqbYqa(nsrIO&m4zgd8j=tK1Z};I
z!3Hf_usD<U&;x64s_ooh&zmw++W|s7im8Xa)3I}o4Hd0*s4}hgytDV`?JkM3oUn;Q
z_T}v_ at B6*qd+*yX78`pnFHc91Bm_Y!B$}+}Ui$3!&)qxu{QSYJ5}q~*QA-fyO}LrL
zS0jAD$l(w5FK8<o6s{J at wD|{kO_Ilx#B!DJjS4b-CBOJUYVlE-HeDyqcs4~_+H7(U
zs27x^1zTiA2l$pj at riWY_)IhY1y5VrRED*-WtOyd?K;AjhtV^0<fSbwkIR%Ryn_g`
z2S*SEtd(gK%04RIwfd1#cx$WaH=9zDpICW1skrnBvGyHawo%+AQ#WZ=A)=Ldm)I}i
zrK>wKJIqb8AsJsT&MpzR98?Qm+$-Z(elAuEPY=2IPYIGgl#OV>C$8L$`%e@%$;#Ax
z!Xbp!5M*5jf+WLreslD{@IrgB+>(CElJ-7{>_mRy;k<g}c2RjM=&~BHlqR!xl^5f1
zou^)lee6E;ALsdzt&O2g&VAq9%-^i~)hA~xbAMZKl%K{!NI)?!ocH?r)tLw)i#d?2
z-qtm1fCLqhev!hA(&{}3QaJG$L0S>xLam97`4dmeigO$bvDfPAh9Wa5QsI&{M=N%}
zhah-7wuw}qU%W_!AiS8XkS{qlKrBHJam<6Hm7C6E68>aL$6qz4v1sorIl`i}_wfTs
ztn`<6En-pn*PasIWJ+^3W~HzweVe(CMd`h{%`8d_&lN(tlaxFrVpCd^o5rT}Nn19H
zQs$d%N*i|I5O|VGHw0`-=O5e4qI7)rYBr at lF`H7#swtF+18hpAvsbZII-Ct7I*C$=
z^+}dW=NG=g?xjsHv3qG>>H=2bZfwqA_tL#7lrrTjS(FZ*S;VH~d7VvZL+%unKCWS_
zbY~x1r5oNzCy!qk?@DOh4fjGt0wEc9!K56QYXc(jeO0K5YqW?k+5NttI(+9b$vpi2
zWKcub)-GRG_(0dgu#|qFts);7Rv3gk8*Re~505Vi^LzZWNR&nF{QkH5*u>0w0m>qF
z{Y8<CMQr0)iHb$+%gyr&S;W5mba^w2Sluq(d=m at yPMDmP#ftXtieb at g)}m!vO at PhK
zBM-4tSa?Ztf#mZG<H=LIN3+Ob=JD{j4+OdKTp+iVMep+uil)e$@^y4d6}qa+MYCA+
zHszHCQdrXcbAGp9C?w;{Ql-~oU-h!+9efI{DreCfIviNm&7ybbue^C>EcI6WP(F*@
zBwsX`MN$?$r$HlN(|b)dwKHp^XM0(Fa&QRU7Dae?JPoRyZU&}w-Fx@^Q`F-Z`Df*@
zihH7Ks(O}n*;6LsAK6ps9ZsJ at Z;n01;hv-(IUw>Malnk9$T$O}mx at Wnz=*<@&MRm(
z>YBnW#U$G3Lu*4dl52?c2N`yo#Fk2mRsDU+vt0_CCW(dx=0^ktgT{guK|#dV;&r1t
z#Au at _=ys#q-GX{w(2WP>kw>txR%}Egb!wzn#<6iTwp2EtysTDSR4e-ll})ope=49K
zHs+rc7)}KYO#*#nJiWdg-BE?+RRulpP6pkrL3cS?pN-bvO at gouWN?c(Sd$U-go2GB
z|Dv1Y*v%+*rxzR32r34<6r){=2>pJEPz<RR1AgTYA-d3N%pWx7hv at g7u&&*xYZmC+
z!%xGazwFZgO`z`%Oa5#Ry32wdcfebP`pSdejG(tOC_npx^d=(x(Tt6 at Vpj^KBRDn&
zS>V~qOMR;TBGp%F#U;P$QdBWIqP)RV*)rDYx{WJ30{VzKYa)^IV1p0!h1RXT-72LN
zP`#oWA{5uv%FBJqojesqSgjl<MC*44bdj*W+gJdC{L*h2HR_w;W4Pt>6v#B#0DQ#g
z_H5J>@^88#!^V2Ct7hyc&4RwKid3T`b1#l4F9QLST;CliI2oAV6xOvE^CMwHPhb9E
z7;-d3Oebr^D6pyl0n38&zn{j&%B4R<MdP^isz!Phm;P+VoHFc+27`}3MMd97u~9R2
zSF_;TF+$PrS6&>MJEm4%Q!5A5N~qv<^<y^tkp7ghphuA39>|ZBor3B^vfF{DDcIl+
z%Fld<-O at -$qS*I^((gNAl`v0G`#uV}YzaZDPzrm8p_KlnqWowWq$_|*MG6Nxl|x<1
zXp!o2pTd?^sli65`br_lRgF;{sLMcWO~Eal!B<K6?umAup(Si+59@}FE1;bspqRv@
zo>NSyfa=*kWvM^iwsxxqyM;?{MMbyF(%V!h>}phW4VR7#L$dD{2I{NOMsZMHI<F_J
zhdw-N)I~r;L4Iq%5D^$a%yt1a-;FqeJPKEj%7h!6CGupC)`Tzcx3smLE9sPFTMSFI
z#rs>;@9JbO<jbD`=c;pBax9HbqS{_*b6b6tR<}3DzSZgSSmPgSy&ij|mkN&@gLCO(
zxTdCfvZ2?}?Pcx|=u1hHKOEk5O>w$w*7YK46d^aJhaO#ar5yXt46P|0r5aTFof*5`
zP5RNsY<hBhL8v;_H)ZaQu<j&Je>70g9_DX at Ayix%QCz|mqXhMS-LL8=6vJJLL8jP5
zA5VH41OmNCEVY>tJ?SqxkoY1DQW#3EhYo}J9~9^*?&sh at 42MtzjTnvw^c^r at f`ZO~
z0fwzL%mY7xBM6)bk=eq&W*(o!GYnO@@U!7mMU#L>iF;v=@Y?c{Cd(!Y3 at -vl5I9i)
zzQ`bfs~G+=R}HqV6AcKZU^Ci!T}~_EvDPyBEr4UguJUnjad+ycy+u0N!{W~Qzj2#{
zSUj%9)!=L(JQi at kYa(u|Up7|!v|EP3;R|_K><2lcfMYldg;Wj<=Q?*R@<iMYE^#Mo
zcUX3Jg2}^pP<=X8J7E&_yCtR_#bdkG;{q4tm<-w>Z};KCQ&GAq2z|JMy8KMl<z;_)
ztTc3O<~|Ug`rS2Ao21!b$Kp at ahG-xf!5d$rjq4gzT&iFEJm?owxon(|jue8A4}TXP
zXchuze*4|WZt0##VOne-6?fCB;nY)9w at e?G!4`AJu6>ykmd>q&;FKH9Vc9dosh>QY
z at 1#C4 at 0Z=>GadV|KADW~m=PbV3B$!HyWO8dPsDUJVX&?Qu+<4*S5K?QWWL)Hh`Ta@
zxTXZKuOz^sWZJlsYG-&e!P=Eg8+)>DzfJ)AVgguu0$NlhfL)sahv&It*KbGwyK!1<
ztakKll)V+7|C#2L!M7!dZ%+_^U`BkbKm61ls&mN3xbDq5D|f7EJ*Rz~=3sh>J688>
z0 at yVPV2uyO*1$7rAw0LVm at Kmi`B0QWZX@|;MLM&S6y6h+xqYPQcc_otL5jm at DEcay
z at -^C1xRaFJLN9jyhQxM@@5=U&vP0qw|6Vffk7C`(8)W(y;<BQB<fGq+eO>#>j632G
zFGS93N$#n6n^YW0zIg5(Qq_}ucS#3{pH0p<aEQztP1gN*n9RDDT;_S7eB!MX-;c-0
z?Bgk+(htamy(v8(eMm04ka98Y55SXpZH7b33$U at L3EE$yxO?HCwZ%qQER0cGSHT$7
zbq>a`*y|a?W38UJMZ_WT at 43CMT07$gvyL&r<FdJFFL+`mcszB^$_cR5=cTRM8V_T*
zT at B2)9TuyP`7%*!chVrD(pAeCUV`z&?!<09;IE>QaDy at 64r9Oq^c{z5YwWs26SK6m
z5nC9pjKc#2=sVQ$r=l_T$r1NE?wi$w&soPvRc&|1O~PldzVD9Pl~((N&<>Z&!_c75
zT8!zqZ?i0Gy_J!GR at 33Aug(FwR*$_J{DdQ?(&dDoA%xv&wd7E`<=B>LXF!3N98e(S
z%qS`?4k#|Ps0z*;3UcQJ($0*crhx+r27?2NswxLqHNgc`$3asYOfI(s;K!>s$CkT{
z!^-q{=h&BWQIDQ~&RA+Ipa at Vzrf*LhY(N~S!VWewir!o3Wg(~Omt`iKTRwoL|9gOw
zP_X}SAnMw>M8gTjp)~X!#W9lv5`hhm+RkQX$?S|AaCRrhfb{-4bHMnnJu?)Z4xB*t
znNifkfdh)}m>D-`x*pV?KXXJ*+c<zY?5fNYoLZ7<o9PFIdV<ePj?>eFdRB7?727+e
z=fxa7VmW9vIq+|*-HYc|ROD$b*1Y9}Zi!W!SE0?z)mbX?a`Toh*Hl#6mQ`BwELuWa
aNz{2=i|gE5J%q)&_`#>D!$F^@5afSC6V8AD
literal 0
HcmV?d00001
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index 3e4690ec3640f2f..418a98873eaa116 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -821,8 +821,8 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR5:[0-9]+]]
; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR5]]
-; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
-; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.round"(i32 7), "fpe.except"(i32 2) ]
+; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
+; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR5]]
; CHECK-NEXT: [[__YEVEN:%.*]] = shl i32 [[Y]], 31
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float [[X]] to i32
diff --git a/llvm/test/Transforms/Attributor/nofpclass.ll b/llvm/test/Transforms/Attributor/nofpclass.ll
index b97454a29d51358..4cedbaf2a36a33e 100644
--- a/llvm/test/Transforms/Attributor/nofpclass.ll
+++ b/llvm/test/Transforms/Attributor/nofpclass.ll
@@ -1937,7 +1937,7 @@ define float @constrained_sitofp(i32 %arg) strictfp {
; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite)
; CHECK-LABEL: define nofpclass(nan nzero sub) float @constrained_sitofp
; CHECK-SAME: (i32 [[ARG:%.*]]) #[[ATTR8:[0-9]+]] {
-; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan nzero sub) float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR20]]
+; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan nzero sub) float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR23:[0-9]+]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %arg, metadata !"round.dynamic", metadata !"fpexcept.strict")
@@ -1948,7 +1948,7 @@ define float @constrained_uitofp(i32 %arg) strictfp {
; CHECK: Function Attrs: mustprogress nofree norecurse nosync nounwind strictfp willreturn memory(inaccessiblemem: readwrite)
; CHECK-LABEL: define nofpclass(nan ninf nzero sub nnorm) float @constrained_uitofp
; CHECK-SAME: (i32 [[ARG:%.*]]) #[[ATTR8]] {
-; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR20]]
+; CHECK-NEXT: [[VAL:%.*]] = call nofpclass(nan ninf nzero sub nnorm) float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[ARG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR23]]
; CHECK-NEXT: ret float [[VAL]]
;
%val = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %arg, metadata !"round.dynamic", metadata !"fpexcept.strict")
diff --git a/llvm/test/Transforms/Inline/inline-strictfp.ll b/llvm/test/Transforms/Inline/inline-strictfp.ll
index bc42fafd63943d3..5883002061c3043 100644
--- a/llvm/test/Transforms/Inline/inline-strictfp.ll
+++ b/llvm/test/Transforms/Inline/inline-strictfp.ll
@@ -15,8 +15,8 @@ entry:
%add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %add
; CHECK-LABEL: @host_02
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
}
@@ -34,8 +34,8 @@ entry:
%add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %add
; CHECK-LABEL: @host_04
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.maytrap") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.downward", metadata !"fpexcept.maytrap") #[[ATTR0]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
}
@@ -77,8 +77,8 @@ entry:
ret float %add
; CHECK-LABEL: @host_08
; CHECK: call float @func_ext(float {{.*}}) #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float {{.*}}, metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
}
@@ -97,8 +97,8 @@ entry:
%add = call double @llvm.experimental.constrained.fadd.f64(double %0, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret double %add
; CHECK-LABEL: @host_10
-; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #0
-; CHECK: call double @llvm.experimental.constrained.fadd.f64(double {{.*}}, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call double @llvm.experimental.constrained.fpext.f64.f32(float {{.*}}, metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK: call double @llvm.experimental.constrained.fadd.f64(double {{.*}}, double 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
}
; fcmp does not depend on rounding mode and has metadata argument.
@@ -114,8 +114,8 @@ entry:
%cmp = call i1 @inlined_11(float %a, float %b) #0
ret i1 %cmp
; CHECK-LABEL: @host_12
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
-; CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float {{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") #0
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float %a, float %b, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
+; CHECK: call i1 @llvm.experimental.constrained.fcmp.f32(float {{.*}}, metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
}
; Intrinsic 'ceil' has constrained variant.
@@ -131,11 +131,12 @@ entry:
%add = call float @llvm.experimental.constrained.fadd.f32(float %0, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
ret float %add
; CHECK-LABEL: @host_14
-; CHECK: call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.ignore") #0
-; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #0
+; CHECK: call float @llvm.experimental.constrained.ceil.f32(float %a, metadata !"fpexcept.ignore") #[[ATTR0]]
+; CHECK: call float @llvm.experimental.constrained.fadd.f32(float {{.*}}, float 2.000000e+00, metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR0]]
}
attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata)
diff --git a/llvm/test/Verifier/fp-intrinsics.ll b/llvm/test/Verifier/fp-intrinsics.ll
index 4934843d5a2ed69..fd7b07abab93ff5 100644
--- a/llvm/test/Verifier/fp-intrinsics.ll
+++ b/llvm/test/Verifier/fp-intrinsics.ll
@@ -5,7 +5,7 @@ declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadat
; Test an illegal value for the rounding mode argument.
; CHECK: invalid rounding mode argument
-; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynomic", metadata !"fpexcept.strict") #1
+; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynomic", metadata !"fpexcept.strict") #{{[0-9]+}}
define double @f2(double %a, double %b) #0 {
entry:
%fadd = call double @llvm.experimental.constrained.fadd.f64(
@@ -17,7 +17,7 @@ entry:
; Test an illegal value for the exception behavior argument.
; CHECK-NEXT: invalid exception behavior argument
-; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.restrict") #1
+; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.restrict") #{{[0-9]+}}
define double @f3(double %a, double %b) #0 {
entry:
%fadd = call double @llvm.experimental.constrained.fadd.f64(
@@ -29,7 +29,7 @@ entry:
; Test an illegal value for the rounding mode argument.
; CHECK-NEXT: invalid rounding mode argument
-; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynomic", metadata !"fpexcept.strict") #1
+; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynomic", metadata !"fpexcept.strict") #{{[0-9]+}}
define double @f4(double %a) #0 {
entry:
%fadd = call double @llvm.experimental.constrained.sqrt.f64(
@@ -41,7 +41,7 @@ entry:
; Test an illegal value for the exception behavior argument.
; CHECK-NEXT: invalid exception behavior argument
-; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.restrict") #1
+; CHECK-NEXT: %fadd = call double @llvm.experimental.constrained.sqrt.f64(double %a, metadata !"round.dynamic", metadata !"fpexcept.restrict") #{{[0-9]+}}
define double @f5(double %a) #0 {
entry:
%fadd = call double @llvm.experimental.constrained.sqrt.f64(
>From 2adad3e3eb53bc350b90792c3ae5044bc149ca51 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Sun, 27 Oct 2024 17:05:47 +0700
Subject: [PATCH 3/7] Update tests in Transforms/EarlyCSE
---
.../Transforms/EarlyCSE/defaultfp-strictfp.ll | 67 ++++++++++---------
.../Transforms/EarlyCSE/ebstrict-strictfp.ll | 23 ++++---
.../Transforms/EarlyCSE/mixed-strictfp.ll | 59 ++++++++--------
.../Transforms/EarlyCSE/nonmixed-strictfp.ll | 59 ++++++++--------
.../Transforms/EarlyCSE/round-dyn-strictfp.ll | 43 ++++++------
.../test/Transforms/EarlyCSE/tfpropagation.ll | 17 ++---
6 files changed, 137 insertions(+), 131 deletions(-)
diff --git a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
index 3871822c9dc17a5..90904ead23e04aa 100644
--- a/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/defaultfp-strictfp.ll
@@ -8,7 +8,7 @@
define double @multiple_fadd(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fadd(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -20,8 +20,8 @@ define double @multiple_fadd(double %a, double %b) #0 {
define double @multiple_fadd_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fadd_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -34,7 +34,7 @@ define double @multiple_fadd_split(double %a, double %b) #0 {
define double @multiple_fsub(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fsub(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -46,8 +46,8 @@ define double @multiple_fsub(double %a, double %b) #0 {
define double @multiple_fsub_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fsub_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -60,7 +60,7 @@ define double @multiple_fsub_split(double %a, double %b) #0 {
define double @multiple_fmul(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fmul(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -72,8 +72,8 @@ define double @multiple_fmul(double %a, double %b) #0 {
define double @multiple_fmul_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fmul_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -86,7 +86,7 @@ define double @multiple_fmul_split(double %a, double %b) #0 {
define double @multiple_fdiv(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fdiv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -98,8 +98,8 @@ define double @multiple_fdiv(double %a, double %b) #0 {
define double @multiple_fdiv_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fdiv_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -112,7 +112,7 @@ define double @multiple_fdiv_split(double %a, double %b) #0 {
define double @multiple_frem(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_frem(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -124,8 +124,8 @@ define double @multiple_frem(double %a, double %b) #0 {
define double @multiple_frem_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_frem_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -138,7 +138,7 @@ define double @multiple_frem_split(double %a, double %b) #0 {
define i32 @multiple_fptoui(double %a) #0 {
; CHECK-LABEL: @multiple_fptoui(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -150,8 +150,8 @@ define i32 @multiple_fptoui(double %a) #0 {
define i32 @multiple_fptoui_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fptoui_split(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -164,7 +164,7 @@ define i32 @multiple_fptoui_split(double %a, double %b) #0 {
define double @multiple_uitofp(i32 %a) #0 {
; CHECK-LABEL: @multiple_uitofp(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -176,8 +176,8 @@ define double @multiple_uitofp(i32 %a) #0 {
define double @multiple_uitofp_split(i32 %a) #0 {
; CHECK-LABEL: @multiple_uitofp_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -190,7 +190,7 @@ define double @multiple_uitofp_split(i32 %a) #0 {
define i32 @multiple_fptosi(double %a) #0 {
; CHECK-LABEL: @multiple_fptosi(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -202,8 +202,8 @@ define i32 @multiple_fptosi(double %a) #0 {
define i32 @multiple_fptosi_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fptosi_split(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -216,7 +216,7 @@ define i32 @multiple_fptosi_split(double %a, double %b) #0 {
define double @multiple_sitofp(i32 %a) #0 {
; CHECK-LABEL: @multiple_sitofp(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -228,8 +228,8 @@ define double @multiple_sitofp(i32 %a) #0 {
define double @multiple_sitofp_split(i32 %a) #0 {
; CHECK-LABEL: @multiple_sitofp_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -243,7 +243,7 @@ define i1 @multiple_fcmp(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fcmp(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -257,9 +257,9 @@ define i1 @multiple_fcmp(double %a, double %b) #0 {
define i1 @multiple_fcmp_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fcmp_split(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -275,7 +275,7 @@ define i1 @multiple_fcmps(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fcmps(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -289,9 +289,9 @@ define i1 @multiple_fcmps(double %a, double %b) #0 {
define i1 @multiple_fcmps_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fcmps_split(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -304,6 +304,7 @@ define i1 @multiple_fcmps_split(double %a, double %b) #0 {
}
attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
declare void @arbitraryfunc() #0
declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
index f2675ce7816a4e4..6183b7c1558a516 100644
--- a/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/ebstrict-strictfp.ll
@@ -10,7 +10,7 @@ define double @fadd_strict(double %a, double %b) #0 {
; CHECK-LABEL: @fadd_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0:[0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -23,7 +23,7 @@ define double @fsub_strict(double %a, double %b) #0 {
; CHECK-LABEL: @fsub_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -36,7 +36,7 @@ define double @fmul_strict(double %a, double %b) #0 {
; CHECK-LABEL: @fmul_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -49,7 +49,7 @@ define double @fdiv_strict(double %a, double %b) #0 {
; CHECK-LABEL: @fdiv_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -62,7 +62,7 @@ define double @frem_strict(double %a, double %b) #0 {
; CHECK-LABEL: @frem_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -75,7 +75,7 @@ define i32 @fptoui_strict(double %a) #0 {
; CHECK-LABEL: @fptoui_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.strict") #0
@@ -88,7 +88,7 @@ define double @uitofp_strict(i32 %a) #0 {
; CHECK-LABEL: @uitofp_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -101,7 +101,7 @@ define i32 @fptosi_strict(double %a) #0 {
; CHECK-LABEL: @fptosi_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.strict") #0
@@ -114,7 +114,7 @@ define double @sitofp_strict(i32 %a) #0 {
; CHECK-LABEL: @sitofp_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
@@ -129,7 +129,7 @@ define i1 @fcmp_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
; CHECK-NEXT: ret i1 [[TMP2]]
;
%1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
@@ -146,7 +146,7 @@ define i1 @fcmps_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
; CHECK-NEXT: ret i1 [[TMP2]]
;
%1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict") #0
@@ -158,6 +158,7 @@ define i1 @fcmps_strict(double %a, double %b) #0 {
}
attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
declare void @arbitraryfunc() #0
declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
index b79f7018b8d0d55..61e976ce4281739 100644
--- a/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/mixed-strictfp.ll
@@ -10,7 +10,7 @@ define double @mixed_fadd_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fadd_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -23,7 +23,7 @@ define double @mixed_fadd_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fadd_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -36,7 +36,7 @@ define double @mixed_fadd_strict(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fadd_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -49,7 +49,7 @@ define double @mixed_fsub_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fsub_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -62,7 +62,7 @@ define double @mixed_fsub_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fsub_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -75,7 +75,7 @@ define double @mixed_fsub_strict(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fsub_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -88,7 +88,7 @@ define double @mixed_fmul_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fmul_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -100,7 +100,7 @@ define double @mixed_fmul_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fmul_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -113,7 +113,7 @@ define double @mixed_fmul_strict(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fmul_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -126,7 +126,7 @@ define double @mixed_fdiv_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fdiv_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -139,7 +139,7 @@ define double @mixed_fdiv_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fdiv_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -152,7 +152,7 @@ define double @mixed_fdiv_strict(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_fdiv_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -165,7 +165,7 @@ define double @mixed_frem_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_frem_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -178,7 +178,7 @@ define double @mixed_frem_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_frem_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -191,7 +191,7 @@ define double @mixed_frem_strict(double %a, double %b) #0 {
; CHECK-LABEL: @mixed_frem_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -204,7 +204,7 @@ define i32 @mixed_fptoui_maytrap(double %a) #0 {
; CHECK-LABEL: @mixed_fptoui_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -217,7 +217,7 @@ define i32 @mixed_fptoui_strict(double %a) #0 {
; CHECK-LABEL: @mixed_fptoui_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -230,7 +230,7 @@ define double @mixed_uitofp_neginf(i32 %a) #0 {
; CHECK-LABEL: @mixed_uitofp_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -243,7 +243,7 @@ define double @mixed_uitofp_maytrap(i32 %a) #0 {
; CHECK-LABEL: @mixed_uitofp_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -256,7 +256,7 @@ define double @mixed_uitofp_strict(i32 %a) #0 {
; CHECK-LABEL: @mixed_uitofp_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -269,7 +269,7 @@ define i32 @mixed_fptosi_maytrap(double %a) #0 {
; CHECK-LABEL: @mixed_fptosi_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -282,7 +282,7 @@ define i32 @mixed_fptosi_strict(double %a) #0 {
; CHECK-LABEL: @mixed_fptosi_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A]], metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP2]]
;
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -295,7 +295,7 @@ define double @mixed_sitofp_neginf(i32 %a) #0 {
; CHECK-LABEL: @mixed_sitofp_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -308,7 +308,7 @@ define double @mixed_sitofp_maytrap(i32 %a) #0 {
; CHECK-LABEL: @mixed_sitofp_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -321,7 +321,7 @@ define double @mixed_sitofp_strict(i32 %a) #0 {
; CHECK-LABEL: @mixed_sitofp_strict(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -336,7 +336,7 @@ define i1 @mixed_fcmp_maytrap(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
; CHECK-NEXT: ret i1 [[TMP2]]
;
%1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -353,7 +353,7 @@ define i1 @mixed_fcmp_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
; CHECK-NEXT: ret i1 [[TMP2]]
;
%1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -370,7 +370,7 @@ define i1 @mixed_fcmps_maytrap(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
; CHECK-NEXT: ret i1 [[TMP2]]
;
%1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -387,7 +387,7 @@ define i1 @mixed_fcmps_strict(double %a, double %b) #0 {
; CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A]], double [[B]], metadata !"oeq", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TMP1]] to i32
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP2]] to i32
-; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP5:%.*]] = call i32 @bar.i32(i32 [[TMP3]], i32 [[TMP4]]) #0
; CHECK-NEXT: ret i1 [[TMP2]]
;
%1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -399,6 +399,7 @@ define i1 @mixed_fcmps_strict(double %a, double %b) #0 {
}
attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
declare void @arbitraryfunc() #0
declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
index 3acf5597dfc3fe6..1ce2fdd3f75de0b 100644
--- a/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/nonmixed-strictfp.ll
@@ -10,7 +10,7 @@
define double @fadd_defaultenv(double %a, double %b) #0 {
; CHECK-LABEL: @fadd_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -22,7 +22,7 @@ define double @fadd_defaultenv(double %a, double %b) #0 {
define double @fadd_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @fadd_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -34,7 +34,7 @@ define double @fadd_neginf(double %a, double %b) #0 {
define double @fadd_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @fadd_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -46,7 +46,7 @@ define double @fadd_maytrap(double %a, double %b) #0 {
define double @fsub_defaultenv(double %a, double %b) #0 {
; CHECK-LABEL: @fsub_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -58,7 +58,7 @@ define double @fsub_defaultenv(double %a, double %b) #0 {
define double @fsub_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @fsub_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -70,7 +70,7 @@ define double @fsub_neginf(double %a, double %b) #0 {
define double @fsub_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @fsub_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -82,7 +82,7 @@ define double @fsub_maytrap(double %a, double %b) #0 {
define double @fmul_defaultenv(double %a, double %b) #0 {
; CHECK-LABEL: @fmul_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -94,7 +94,7 @@ define double @fmul_defaultenv(double %a, double %b) #0 {
define double @fmul_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @fmul_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -105,7 +105,7 @@ define double @fmul_neginf(double %a, double %b) #0 {
define double @fmul_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @fmul_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -117,7 +117,7 @@ define double @fmul_maytrap(double %a, double %b) #0 {
define double @fdiv_defaultenv(double %a, double %b) #0 {
; CHECK-LABEL: @fdiv_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -129,7 +129,7 @@ define double @fdiv_defaultenv(double %a, double %b) #0 {
define double @fdiv_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @fdiv_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -141,7 +141,7 @@ define double @fdiv_neginf(double %a, double %b) #0 {
define double @fdiv_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @fdiv_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -153,7 +153,7 @@ define double @fdiv_maytrap(double %a, double %b) #0 {
define double @frem_defaultenv(double %a, double %b) #0 {
; CHECK-LABEL: @frem_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -165,7 +165,7 @@ define double @frem_defaultenv(double %a, double %b) #0 {
define double @frem_neginf(double %a, double %b) #0 {
; CHECK-LABEL: @frem_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -177,7 +177,7 @@ define double @frem_neginf(double %a, double %b) #0 {
define double @frem_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @frem_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -189,7 +189,7 @@ define double @frem_maytrap(double %a, double %b) #0 {
define i32 @fptoui_defaultenv(double %a) #0 {
; CHECK-LABEL: @fptoui_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -201,7 +201,7 @@ define i32 @fptoui_defaultenv(double %a) #0 {
define i32 @fptoui_maytrap(double %a) #0 {
; CHECK-LABEL: @fptoui_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double [[A:%.*]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptoui.i32.f64(double %a, metadata !"fpexcept.maytrap") #0
@@ -213,7 +213,7 @@ define i32 @fptoui_maytrap(double %a) #0 {
define double @uitofp_defaultenv(i32 %a) #0 {
; CHECK-LABEL: @uitofp_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -225,7 +225,7 @@ define double @uitofp_defaultenv(i32 %a) #0 {
define double @uitofp_neginf(i32 %a) #0 {
; CHECK-LABEL: @uitofp_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -237,7 +237,7 @@ define double @uitofp_neginf(i32 %a) #0 {
define double @uitofp_maytrap(i32 %a) #0 {
; CHECK-LABEL: @uitofp_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -249,7 +249,7 @@ define double @uitofp_maytrap(i32 %a) #0 {
define i32 @fptosi_defaultenv(double %a) #0 {
; CHECK-LABEL: @fptosi_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.ignore") #0
@@ -261,7 +261,7 @@ define i32 @fptosi_defaultenv(double %a) #0 {
define i32 @fptosi_maytrap(double %a) #0 {
; CHECK-LABEL: @fptosi_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double [[A:%.*]], metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call i32 @bar.i32(i32 [[TMP1]], i32 [[TMP1]]) #0
; CHECK-NEXT: ret i32 [[TMP1]]
;
%1 = call i32 @llvm.experimental.constrained.fptosi.i32.f64(double %a, metadata !"fpexcept.maytrap") #0
@@ -273,7 +273,7 @@ define i32 @fptosi_maytrap(double %a) #0 {
define double @sitofp_defaultenv(i32 %a) #0 {
; CHECK-LABEL: @sitofp_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.ignore") #0
@@ -285,7 +285,7 @@ define double @sitofp_defaultenv(i32 %a) #0 {
define double @sitofp_neginf(i32 %a) #0 {
; CHECK-LABEL: @sitofp_neginf(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.downward", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.downward", metadata !"fpexcept.ignore") #0
@@ -297,7 +297,7 @@ define double @sitofp_neginf(i32 %a) #0 {
define double @sitofp_maytrap(i32 %a) #0 {
; CHECK-LABEL: @sitofp_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.tonearest", metadata !"fpexcept.maytrap") #[[ATTR0]]
-; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP2:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP1]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.maytrap") #0
@@ -310,7 +310,7 @@ define i1 @fcmp_defaultenv(double %a, double %b) #0 {
; CHECK-LABEL: @fcmp_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -325,7 +325,7 @@ define i1 @fcmp_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @fcmp_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
@@ -340,7 +340,7 @@ define i1 @fcmps_defaultenv(double %a, double %b) #0 {
; CHECK-LABEL: @fcmps_defaultenv(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.ignore") #0
@@ -355,7 +355,7 @@ define i1 @fcmps_maytrap(double %a, double %b) #0 {
; CHECK-LABEL: @fcmps_maytrap(
; CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double [[A:%.*]], double [[B:%.*]], metadata !"oeq", metadata !"fpexcept.maytrap") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
-; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call i32 @bar.i32(i32 [[TMP2]], i32 [[TMP2]]) #0
; CHECK-NEXT: ret i1 [[TMP1]]
;
%1 = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.maytrap") #0
@@ -367,6 +367,7 @@ define i1 @fcmps_maytrap(double %a, double %b) #0 {
}
attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
declare void @arbitraryfunc() #0
declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll b/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll
index c33e022f53be290..b2cebfeb586e3d8 100644
--- a/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll
+++ b/llvm/test/Transforms/EarlyCSE/round-dyn-strictfp.ll
@@ -11,7 +11,7 @@ define double @multiple_fadd(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fadd(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0:[0-9]+]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -23,9 +23,9 @@ define double @multiple_fadd(double %a, double %b) #0 {
define double @multiple_fadd_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fadd_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fadd.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -39,7 +39,7 @@ define double @multiple_fsub(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fsub(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -51,9 +51,9 @@ define double @multiple_fsub(double %a, double %b) #0 {
define double @multiple_fsub_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fsub_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fsub.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -67,7 +67,7 @@ define double @multiple_fmul(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fmul(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -79,9 +79,9 @@ define double @multiple_fmul(double %a, double %b) #0 {
define double @multiple_fmul_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fmul_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fmul.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -95,7 +95,7 @@ define double @multiple_fdiv(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fdiv(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -107,9 +107,9 @@ define double @multiple_fdiv(double %a, double %b) #0 {
define double @multiple_fdiv_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_fdiv_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.fdiv.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -123,7 +123,7 @@ define double @multiple_frem(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_frem(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -135,9 +135,9 @@ define double @multiple_frem(double %a, double %b) #0 {
define double @multiple_frem_split(double %a, double %b) #0 {
; CHECK-LABEL: @multiple_frem_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A:%.*]], double [[B:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.frem.f64(double [[A]], double [[B]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP2]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.frem.f64(double %a, double %b, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -151,7 +151,7 @@ define double @multiple_uitofp(i32 %a) #0 {
; CHECK-LABEL: @multiple_uitofp(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -163,9 +163,9 @@ define double @multiple_uitofp(i32 %a) #0 {
define double @multiple_uitofp_split(i32 %a) #0 {
; CHECK-LABEL: @multiple_uitofp_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -179,7 +179,7 @@ define double @multiple_sitofp(i32 %a) #0 {
; CHECK-LABEL: @multiple_sitofp(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -191,9 +191,9 @@ define double @multiple_sitofp(i32 %a) #0 {
define double @multiple_sitofp_split(i32 %a) #0 {
; CHECK-LABEL: @multiple_sitofp_split(
; CHECK-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A:%.*]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: call void @arbitraryfunc() #[[ATTR0]]
+; CHECK-NEXT: call void @arbitraryfunc() #0
; CHECK-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 [[A]], metadata !"round.dynamic", metadata !"fpexcept.ignore") #[[ATTR0]]
-; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #[[ATTR0]]
+; CHECK-NEXT: [[TMP3:%.*]] = call double @foo.f64(double [[TMP1]], double [[TMP1]]) #0
; CHECK-NEXT: ret double [[TMP2]]
;
%1 = call double @llvm.experimental.constrained.sitofp.f64.i32(i32 %a, metadata !"round.dynamic", metadata !"fpexcept.ignore") #0
@@ -204,6 +204,7 @@ define double @multiple_sitofp_split(i32 %a) #0 {
}
attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
declare void @arbitraryfunc() #0
declare double @foo.f64(double, double) #0
diff --git a/llvm/test/Transforms/EarlyCSE/tfpropagation.ll b/llvm/test/Transforms/EarlyCSE/tfpropagation.ll
index d07c9627f9b52f3..53127bf5f3aec76 100644
--- a/llvm/test/Transforms/EarlyCSE/tfpropagation.ll
+++ b/llvm/test/Transforms/EarlyCSE/tfpropagation.ll
@@ -68,10 +68,10 @@ define double @branching_exceptignore(i64 %a) #0 {
; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
; CHECK: if.then3:
-; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]]
+; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0
; CHECK-NEXT: br label [[OUT:%.*]]
; CHECK: if.end3:
-; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]]
+; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0
; CHECK-NEXT: br label [[OUT]]
; CHECK: out:
; CHECK-NEXT: ret double [[CONV1]]
@@ -98,10 +98,10 @@ define double @branching_exceptignore_dynround(i64 %a) #0 {
; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.ignore") #[[ATTR0]]
; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
; CHECK: if.then3:
-; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]]
+; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0
; CHECK-NEXT: br label [[OUT:%.*]]
; CHECK: if.end3:
-; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]]
+; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0
; CHECK-NEXT: br label [[OUT]]
; CHECK: out:
; CHECK-NEXT: ret double [[CONV1]]
@@ -128,10 +128,10 @@ define double @branching_maytrap(i64 %a) #0 {
; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.maytrap") #[[ATTR0]]
; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
; CHECK: if.then3:
-; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #[[ATTR0]]
+; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 true) #0
; CHECK-NEXT: br label [[OUT:%.*]]
; CHECK: if.end3:
-; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #[[ATTR0]]
+; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 false) #0
; CHECK-NEXT: br label [[OUT]]
; CHECK: out:
; CHECK-NEXT: ret double [[CONV1]]
@@ -160,10 +160,10 @@ define double @branching_ebstrict(i64 %a) #0 {
; CHECK-NEXT: [[CMP2:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double 1.000000e+00, double [[CONV1]], metadata !"ogt", metadata !"fpexcept.strict") #[[ATTR0]]
; CHECK-NEXT: br i1 [[CMP2]], label [[IF_THEN3:%.*]], label [[IF_END3:%.*]]
; CHECK: if.then3:
-; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 [[CMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[C:%.*]] = call double @truefunc.f64.i1(i1 [[CMP2]]) #0
; CHECK-NEXT: br label [[OUT:%.*]]
; CHECK: if.end3:
-; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 [[CMP2]]) #[[ATTR0]]
+; CHECK-NEXT: [[D:%.*]] = call double @falsefunc.f64.i1(i1 [[CMP2]]) #0
; CHECK-NEXT: br label [[OUT]]
; CHECK: out:
; CHECK-NEXT: ret double [[CONV1]]
@@ -190,5 +190,6 @@ declare double @llvm.experimental.constrained.uitofp.f64.i64(i64, metadata, meta
declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata) #0
attributes #0 = { strictfp }
+; CHECK: attributes #[[ATTR0]] = { strictfp memory(inaccessiblemem: readwrite) }
declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata) strictfp
>From c9de35a54da705dfeccb0fc4a11f82ef62ba6dc5 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Mon, 28 Oct 2024 22:12:59 +0700
Subject: [PATCH 4/7] Small changes: docs, clang-format, revievers' notes
- Fix Doxygen error,
- Fix clang-format error,
- remove unused function declaration,
- remove setting MD_fpmath, it is made by copyMetadata.
---
llvm/docs/LangRef.rst | 4 ++--
llvm/include/llvm/IR/AutoUpgrade.h | 2 --
llvm/lib/IR/AutoUpgrade.cpp | 4 +---
llvm/lib/Transforms/Utils/CloneFunction.cpp | 2 +-
4 files changed, 4 insertions(+), 8 deletions(-)
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 79beea32f4110b9..e73276374288f4a 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -3011,7 +3011,7 @@ Floating-point Environment Operand Bundles
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
These operand bundles provide details on how the operation interacts with the
-:ref:`floating-point environment <_floatenv>`. There are two kinds of such
+:ref:`floating-point environment <floatenv>`. There are two kinds of such
operand bundles, which characterize interaction with floating-point control
modes and status bits respectively.
@@ -3043,7 +3043,7 @@ string value, which may have one of the values:
"maytrap"
It has the same meaning as the corresponding argument in
-:ref:`constrained intrinsics <_constrainedfp>`.
+:ref:`constrained intrinsics <constrainedfp>`.
.. _moduleasm:
diff --git a/llvm/include/llvm/IR/AutoUpgrade.h b/llvm/include/llvm/IR/AutoUpgrade.h
index 8bd005d73fba369..97c3e4d7589d7bb 100644
--- a/llvm/include/llvm/IR/AutoUpgrade.h
+++ b/llvm/include/llvm/IR/AutoUpgrade.h
@@ -107,8 +107,6 @@ namespace llvm {
/// Upgrade operand bundles (without knowing about their user instruction).
void UpgradeOperandBundles(std::vector<OperandBundleDef> &OperandBundles);
- CallBase *upgradeConstrainedFunctionCall(CallBase *CB);
-
} // End llvm namespace
#endif
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 4f446edec5aee88..7d3c9f63756c360 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -4356,9 +4356,7 @@ static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
Bundles.append(NewBundles);
Builder.SetInsertPoint(CB->getParent(), CB->getIterator());
- MDNode *FPMath = CB->getMetadata(LLVMContext::MD_fpmath);
- NewCB = Builder.CreateCall(F, Args, Bundles, CB->getName(), FPMath);
-
+ NewCB = Builder.CreateCall(F, Args, Bundles, CB->getName());
NewCB->copyMetadata(*CB);
AttributeList Attrs = CB->getAttributes();
NewCB->setAttributes(Attrs);
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 6fa3f1dea825136..166e79b131fbe36 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -501,7 +501,7 @@ PruningFunctionCloner::cloneInstruction(BasicBlock::const_iterator II) {
}
Args.push_back(
MetadataAsValue::get(Ctx, MDString::get(Ctx, "fpexcept.ignore")));
- addFPExceptionBundle(Ctx, Bundles, fp::ExceptionBehavior::ebIgnore);
+ addFPExceptionBundle(Ctx, Bundles, fp::ExceptionBehavior::ebIgnore);
auto *NewConstrainedInst =
CallInst::Create(IFn, Args, Bundles, OldInst.getName() + ".strict");
>From f407984781a9ad38985cc7a2cf87bd1b3103c720 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Wed, 11 Dec 2024 22:23:24 +0700
Subject: [PATCH 5/7] Small fixes
- Add method `hasFloatingPointBundles` to simplify checks,
- Update comments in `TailRecursionElimination.cpp`,
- Updated messages in Verifier,
- Add Verifier tests.
- Rebame remained cases of 'fpe.round' for 'fpe.control'
---
llvm/include/llvm/IR/InstrTypes.h | 3 +
llvm/lib/IR/AutoUpgrade.cpp | 3 +-
llvm/lib/IR/IRBuilder.cpp | 2 +-
llvm/lib/IR/Instructions.cpp | 5 ++
llvm/lib/IR/LLVMContext.cpp | 2 +-
llvm/lib/IR/Verifier.cpp | 20 +++--
.../Scalar/TailRecursionElimination.cpp | 8 +-
llvm/test/Verifier/fp-intrinsics.ll | 90 +++++++++++++++++++
8 files changed, 116 insertions(+), 17 deletions(-)
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index 2cc6c0359bf7ad1..347a4f2ae760076 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -2145,6 +2145,9 @@ class CallBase : public Instruction {
/// Return exception behavior specified by operand bundles.
std::optional<fp::ExceptionBehavior> getExceptionBehavior() const;
+ // Does the called function have floating-point bundles?
+ bool hasFloatingPointBundles() const;
+
/// Used to keep track of an operand bundle. See the main comment on
/// OperandBundleUser above.
struct BundleOpInfo {
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 7d3c9f63756c360..b1f4523c774af95 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -4328,8 +4328,7 @@ static void upgradeDbgIntrinsicToDbgRecord(StringRef Name, CallBase *CI) {
static CallBase *upgradeConstrainedIntrinsicCall(CallBase *CB, Function *F,
IRBuilder<> &Builder) {
- if (CB->getOperandBundle(LLVMContext::OB_fpe_control) ||
- CB->getOperandBundle(LLVMContext::OB_fpe_except))
+ if (CB->hasFloatingPointBundles())
return nullptr;
SmallVector<OperandBundleDef, 2> NewBundles;
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index abc20acdefddacd..db961ffdc856475 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -99,7 +99,7 @@ CallInst *IRBuilderBase::CreateCall(FunctionType *FTy, Value *Callee,
if (IntrinsicInst::canAccessFPEnvironment(ID)) {
bool NeedRound = true, NeedExcept = true;
for (const auto &Item : OpBundles) {
- if (NeedRound && Item.getTag() == "fpe.round")
+ if (NeedRound && Item.getTag() == "fpe.control")
NeedRound = false;
else if (NeedExcept && Item.getTag() == "fpe.except")
NeedExcept = false;
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 64c08eb8b37889a..cc95287b680b27f 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -622,6 +622,11 @@ std::optional<fp::ExceptionBehavior> CallBase::getExceptionBehavior() const {
return std::nullopt;
}
+bool CallBase::hasFloatingPointBundles() const {
+ return getOperandBundle(LLVMContext::OB_fpe_control) ||
+ getOperandBundle(LLVMContext::OB_fpe_except);
+}
+
MemoryEffects CallBase::getMemoryEffects() const {
MemoryEffects ME = getAttributes().getMemoryEffects();
if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
diff --git a/llvm/lib/IR/LLVMContext.cpp b/llvm/lib/IR/LLVMContext.cpp
index d21a7e6a42bec36..622d0a6ebd94d6b 100644
--- a/llvm/lib/IR/LLVMContext.cpp
+++ b/llvm/lib/IR/LLVMContext.cpp
@@ -84,7 +84,7 @@ LLVMContext::LLVMContext() : pImpl(new LLVMContextImpl(*this)) {
auto *RoundingEntry = pImpl->getOrInsertBundleTag("fpe.control");
assert(RoundingEntry->second == LLVMContext::OB_fpe_control &&
- "fpe.round operand bundle id drifted!");
+ "fpe.control operand bundle id drifted!");
(void)RoundingEntry;
auto *ExceptionEntry = pImpl->getOrInsertBundleTag("fpe.except");
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 57ba1fbc0734465..10261be1de1e50c 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -3729,7 +3729,7 @@ void Verifier::visitCallBase(CallBase &Call) {
FoundGCTransitionBundle = false, FoundCFGuardTargetBundle = false,
FoundPreallocatedBundle = false, FoundGCLiveBundle = false,
FoundPtrauthBundle = false, FoundKCFIBundle = false,
- FoundAttachedCallBundle = false, FoundFpeRoundBundle = false,
+ FoundAttachedCallBundle = false, FoundFpeControlBundle = false,
FoundFpeExceptBundle = false;
for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
@@ -3795,18 +3795,20 @@ void Verifier::visitCallBase(CallBase &Call) {
FoundAttachedCallBundle = true;
verifyAttachedCallBundle(Call, BU);
} else if (Tag == LLVMContext::OB_fpe_control) {
- Check(!FoundFpeRoundBundle, "Multiple fpe.round operand bundles", Call);
+ Check(!FoundFpeControlBundle, "Multiple fpe.control operand bundles",
+ Call);
Check(BU.Inputs.size() == 1,
- "Expected exactly one fpe.round bundle operand", Call);
+ "Expected exactly one fpe.control bundle operand", Call);
auto *V = dyn_cast<MetadataAsValue>(BU.Inputs.front());
- Check(V, "Value of fpe.round bundle operand must be a metadata", Call);
+ Check(V, "Value of fpe.control bundle operand must be a metadata", Call);
auto *MDS = dyn_cast<MDString>(V->getMetadata());
- Check(MDS, "Value of fpe.round bundle operand must be a string", Call);
+ Check(MDS, "Value of fpe.control bundle operand must be a string", Call);
auto RM = convertStrToRoundingMode(MDS->getString(), true);
- Check(RM.has_value(),
- "Value of fpe.round bundle operand is not a correct rounding mode",
- Call);
- FoundFpeRoundBundle = true;
+ Check(
+ RM.has_value(),
+ "Value of fpe.control bundle operand is not a correct rounding mode",
+ Call);
+ FoundFpeControlBundle = true;
} else if (Tag == LLVMContext::OB_fpe_except) {
Check(!FoundFpeExceptBundle, "Multiple fpe.except operand bundles", Call);
Check(BU.Inputs.size() == 1,
diff --git a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
index 7538c84b03bfa41..b20cbc9a19729d8 100644
--- a/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/TailRecursionElimination.cpp
@@ -249,14 +249,14 @@ static bool markTails(Function &F, OptimizationRemarkEmitter *ORE) {
if (II->getIntrinsicID() == Intrinsic::stackrestore)
continue;
- // Special-case operand bundles "clang.arc.attachedcall", "ptrauth", and
- // "kcfi".
bool IsNoTail =
CI->isNoTailCall() ||
CI->hasOperandBundlesOtherThan(
{LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_ptrauth,
- LLVMContext::OB_kcfi, LLVMContext::OB_fpe_control,
- LLVMContext::OB_fpe_except});
+ LLVMContext::OB_kcfi,
+ // A call with FP operand bundles should be treated in the same
+ // way as a call without them.
+ LLVMContext::OB_fpe_control, LLVMContext::OB_fpe_except});
if (!IsNoTail && CI->doesNotAccessMemory()) {
// A call to a readnone function whose arguments are all things computed
diff --git a/llvm/test/Verifier/fp-intrinsics.ll b/llvm/test/Verifier/fp-intrinsics.ll
index fd7b07abab93ff5..a3855e4b5a5eb73 100644
--- a/llvm/test/Verifier/fp-intrinsics.ll
+++ b/llvm/test/Verifier/fp-intrinsics.ll
@@ -51,4 +51,94 @@ entry:
ret double %fadd
}
+; Test multiple fpe.control bundles.
+; CHECK-NEXT: Multiple fpe.control operand bundles
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.control"(metadata !"rtz"), "fpe.control"(metadata !"rtz") ]
+define double @f6(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.control"(metadata !"rtz"), "fpe.control"(metadata !"rtz") ]
+ ret double %ftrunc
+}
+
+; Test fpe.control bundle that has more than one operands.
+; CHECK-NEXT: Expected exactly one fpe.control bundle operand
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.control"(metadata !"rtz", metadata !"rte") ]
+define double @f7(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.control"(metadata !"rtz", metadata !"rte") ]
+ ret double %ftrunc
+}
+
+; Test fpe.control bundle that has non-metadata operand.
+; CHECK-NEXT: Value of fpe.control bundle operand must be a metadata
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.control"(i32 0) ]
+define double @f8(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.control"(i32 0) ]
+ ret double %ftrunc
+}
+
+; Test fpe.control bundle that has non-string operand.
+; CHECK-NEXT: Value of fpe.control bundle operand must be a string
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.control"(metadata i64 3) ]
+define double @f9(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.control"(metadata !{i64 3}) ]
+ ret double %ftrunc
+}
+
+; Test fpe.control bundle that specifies incorrect value.
+; CHECK-NEXT: Value of fpe.control bundle operand is not a correct rounding mode
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.control"(metadata !"qqq") ]
+define double @f10(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.control"(metadata !"qqq") ]
+ ret double %ftrunc
+}
+
+; Test multiple fpe.except bundles.
+; CHECK-NEXT: Multiple fpe.except operand bundles
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.except"(metadata !"strict"), "fpe.except"(metadata !"strict") ]
+define double @f11(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.except"(metadata !"strict"), "fpe.except"(metadata !"strict") ]
+ ret double %ftrunc
+}
+
+; Test fpe.except bundle that has more than one operands.
+; CHECK-NEXT: Expected exactly one fpe.except bundle operand
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.except"(metadata !"strict", metadata !"strict") ]
+define double @f12(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.except"(metadata !"strict", metadata !"strict") ]
+ ret double %ftrunc
+}
+
+; Test fpe.except bundle that has non-metadata operand.
+; CHECK-NEXT: Value of fpe.except bundle operand must be a metadata
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.except"(i32 0) ]
+define double @f13(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.except"(i32 0) ]
+ ret double %ftrunc
+}
+
+; Test fpe.except bundle that has non-string operand.
+; CHECK-NEXT: Value of fpe.except bundle operand must be a string
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.except"(metadata i64 3) ]
+define double @f14(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.except"(metadata !{i64 3}) ]
+ ret double %ftrunc
+}
+
+; Test fpe.except bundle that specifies incorrect value.
+; CHECK-NEXT: Value of fpe.except bundle operand is not a correct exception behavior
+; CHECK-NEXT: %ftrunc = call double @llvm.trunc.f64(double %a) #{{[0-9]+}} [ "fpe.except"(metadata !"qqq") ]
+define double @f15(double %a) #0 {
+entry:
+ %ftrunc = call double @llvm.trunc.f64(double %a) #0 [ "fpe.except"(metadata !"qqq") ]
+ ret double %ftrunc
+}
+
attributes #0 = { strictfp }
>From a51492d656e862a6bb14cf2065ab40364a41dfb3 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Wed, 18 Dec 2024 19:21:12 +0700
Subject: [PATCH 6/7] Calculate MemoryEffect
---
llvm/include/llvm/IR/IRBuilder.h | 2 ++
llvm/include/llvm/IR/InstrTypes.h | 2 ++
llvm/lib/IR/Instructions.cpp | 20 +++++++++++++-------
3 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index 37a62770ed126cc..a66bc4b2d4e812e 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -375,6 +375,8 @@ class IRBuilderBase {
void setConstrainedFPCallAttr(CallBase *I) {
I->addFnAttr(Attribute::StrictFP);
MemoryEffects ME = MemoryEffects::inaccessibleMemOnly();
+ if (I->getAttributes().hasFnAttr(Attribute::Memory))
+ ME |= I->getAttributes().getMemoryEffects();
auto A = Attribute::getWithMemoryEffects(getContext(), ME);
I->addFnAttr(A);
}
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index 347a4f2ae760076..21ae0004bbf7a59 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -1166,6 +1166,8 @@ class CallBase : public Instruction {
/// number of extra operands.
unsigned getNumSubclassExtraOperandsDynamic() const;
+ MemoryEffects getMemoryEffectsForBundles() const;
+
public:
using Instruction::getContext;
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index cc95287b680b27f..5c35dc8d23dc552 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -627,17 +627,23 @@ bool CallBase::hasFloatingPointBundles() const {
getOperandBundle(LLVMContext::OB_fpe_except);
}
+MemoryEffects CallBase::getMemoryEffectsForBundles() const {
+ MemoryEffects ME = MemoryEffects::none();
+ if (hasFloatingPointBundles())
+ ME |= MemoryEffects::inaccessibleMemOnly();
+ if (hasReadingOperandBundles())
+ ME |= MemoryEffects::readOnly();
+ if (hasClobberingOperandBundles())
+ ME |= MemoryEffects::writeOnly();
+ return ME;
+}
+
MemoryEffects CallBase::getMemoryEffects() const {
MemoryEffects ME = getAttributes().getMemoryEffects();
if (auto *Fn = dyn_cast<Function>(getCalledOperand())) {
MemoryEffects FnME = Fn->getMemoryEffects();
- if (hasOperandBundles()) {
- // TODO: Add a method to get memory effects for operand bundles instead.
- if (hasReadingOperandBundles())
- FnME |= MemoryEffects::readOnly();
- if (hasClobberingOperandBundles())
- FnME |= MemoryEffects::writeOnly();
- }
+ if (hasOperandBundles())
+ FnME |= getMemoryEffectsForBundles();
ME &= FnME;
}
return ME;
>From 535dd7b462c68a2c1435ab8f6c148a12471993a2 Mon Sep 17 00:00:00 2001
From: Serge Pavlov <sepavloff at gmail.com>
Date: Wed, 8 Jan 2025 19:41:31 +0700
Subject: [PATCH 7/7] Don't set strictfp on irrelevant calls
---
.../test/CodeGen/SystemZ/strictfp_builtins.c | 18 +-
clang/test/CodeGen/X86/strictfp_builtins.c | 6 +-
clang/test/CodeGen/cx-complex-range.c | 20 +-
clang/test/CodeGen/isfpclass.c | 18 +-
.../CodeGen/strictfp-elementwise-bulitins.cpp | 6 +-
clang/test/CodeGen/strictfp_builtins.c | 20 +-
.../cl20-device-side-enqueue-attributes.cl | 8 +-
llvm/lib/IR/IRBuilder.cpp | 11 +-
.../AMDGPU/amdgpu-simplify-libcall-pown.ll | 4 +-
.../AMDGPU/global_atomic_optimizer_fp_rtn.ll | 588 +++++++++---------
.../global_atomics_optimizer_fp_no_rtn.ll | 560 ++++++++---------
.../AMDGPU/expand-atomic-rmw-fadd.ll | 4 +-
.../HardwareLoops/scalar-while-strictfp.ll | 56 +-
13 files changed, 644 insertions(+), 675 deletions(-)
diff --git a/clang/test/CodeGen/SystemZ/strictfp_builtins.c b/clang/test/CodeGen/SystemZ/strictfp_builtins.c
index 8c8f1f4cabd7429..b60fd932d31c3f4 100644
--- a/clang/test/CodeGen/SystemZ/strictfp_builtins.c
+++ b/clang/test/CodeGen/SystemZ/strictfp_builtins.c
@@ -9,7 +9,7 @@
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f32(float [[TMP0]], i64 15) #[[ATTR2:[0-9]+]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f32(float [[TMP0]], i64 15)
// CHECK-NEXT: ret i32 [[TMP1]]
//
int test_isnan_float(float f) {
@@ -21,7 +21,7 @@ int test_isnan_float(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f64(double [[TMP0]], i64 15) #[[ATTR2]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f64(double [[TMP0]], i64 15)
// CHECK-NEXT: ret i32 [[TMP1]]
//
int test_isnan_double(double d) {
@@ -34,7 +34,7 @@ int test_isnan_double(double d) {
// CHECK-NEXT: [[LD:%.*]] = load fp128, ptr [[TMP0:%.*]], align 8
// CHECK-NEXT: store fp128 [[LD]], ptr [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[LD_ADDR]], align 8
-// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.s390.tdc.f128(fp128 [[TMP1]], i64 15) #[[ATTR2]]
+// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.s390.tdc.f128(fp128 [[TMP1]], i64 15)
// CHECK-NEXT: ret i32 [[TMP2]]
//
int test_isnan_long_double(long double ld) {
@@ -46,7 +46,7 @@ int test_isnan_long_double(long double ld) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f32(float [[TMP0]], i64 48) #[[ATTR2]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f32(float [[TMP0]], i64 48)
// CHECK-NEXT: ret i32 [[TMP1]]
//
int test_isinf_float(float f) {
@@ -58,7 +58,7 @@ int test_isinf_float(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f64(double [[TMP0]], i64 48) #[[ATTR2]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f64(double [[TMP0]], i64 48)
// CHECK-NEXT: ret i32 [[TMP1]]
//
int test_isinf_double(double d) {
@@ -71,7 +71,7 @@ int test_isinf_double(double d) {
// CHECK-NEXT: [[LD:%.*]] = load fp128, ptr [[TMP0:%.*]], align 8
// CHECK-NEXT: store fp128 [[LD]], ptr [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[LD_ADDR]], align 8
-// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.s390.tdc.f128(fp128 [[TMP1]], i64 48) #[[ATTR2]]
+// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.s390.tdc.f128(fp128 [[TMP1]], i64 48)
// CHECK-NEXT: ret i32 [[TMP2]]
//
int test_isinf_long_double(long double ld) {
@@ -83,7 +83,7 @@ int test_isinf_long_double(long double ld) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f32(float [[TMP0]], i64 4032) #[[ATTR2]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f32(float [[TMP0]], i64 4032)
// CHECK-NEXT: ret i32 [[TMP1]]
//
int test_isfinite_float(float f) {
@@ -95,7 +95,7 @@ int test_isfinite_float(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f64(double [[TMP0]], i64 4032) #[[ATTR2]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.s390.tdc.f64(double [[TMP0]], i64 4032)
// CHECK-NEXT: ret i32 [[TMP1]]
//
int test_isfinite_double(double d) {
@@ -108,7 +108,7 @@ int test_isfinite_double(double d) {
// CHECK-NEXT: [[LD:%.*]] = load fp128, ptr [[TMP0:%.*]], align 8
// CHECK-NEXT: store fp128 [[LD]], ptr [[LD_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load fp128, ptr [[LD_ADDR]], align 8
-// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.s390.tdc.f128(fp128 [[TMP1]], i64 4032) #[[ATTR2]]
+// CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.s390.tdc.f128(fp128 [[TMP1]], i64 4032)
// CHECK-NEXT: ret i32 [[TMP2]]
//
int test_isfinite_long_double(long double ld) {
diff --git a/clang/test/CodeGen/X86/strictfp_builtins.c b/clang/test/CodeGen/X86/strictfp_builtins.c
index 75ed3a2555b3d7d..c4a22dc5ee90fdf 100644
--- a/clang/test/CodeGen/X86/strictfp_builtins.c
+++ b/clang/test/CodeGen/X86/strictfp_builtins.c
@@ -27,7 +27,7 @@ void p(char *str, int x) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516) #[[ATTR4:[0-9]+]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 516)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.1, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
@@ -43,7 +43,7 @@ void test_long_double_isinf(long double ld) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 504)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
@@ -59,7 +59,7 @@ void test_long_double_isfinite(long double ld) {
// CHECK-NEXT: [[LD_ADDR:%.*]] = alloca x86_fp80, align 16
// CHECK-NEXT: store x86_fp80 [[LD:%.*]], ptr [[LD_ADDR]], align 16
// CHECK-NEXT: [[TMP0:%.*]] = load x86_fp80, ptr [[LD_ADDR]], align 16
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3) #[[ATTR4]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f80(x86_fp80 [[TMP0]], i32 3)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR3]]
// CHECK-NEXT: ret void
diff --git a/clang/test/CodeGen/cx-complex-range.c b/clang/test/CodeGen/cx-complex-range.c
index 88300041061aaec..98735c4671ce523 100644
--- a/clang/test/CodeGen/cx-complex-range.c
+++ b/clang/test/CodeGen/cx-complex-range.c
@@ -1575,8 +1575,8 @@ _Complex float mulf(_Complex float a, _Complex float b) {
// X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
-// X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]]) #[[ATTR3]]
-// X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]]) #[[ATTR3]]
+// X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
+// X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
// X86WINPRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
@@ -2658,8 +2658,8 @@ _Complex double muld(_Complex double a, _Complex double b) {
// X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
// X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
// X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
-// X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]]) #[[ATTR3]]
-// X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]]) #[[ATTR3]]
+// X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
+// X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
// X86WINPRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
@@ -2713,8 +2713,8 @@ _Complex double muld(_Complex double a, _Complex double b) {
// PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
// PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
// PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
-// PRMTD_STRICT-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]]) #[[ATTR4]]
-// PRMTD_STRICT-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]]) #[[ATTR4]]
+// PRMTD_STRICT-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
+// PRMTD_STRICT-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
// PRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
@@ -3961,8 +3961,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
// X86WINPRMTD_STRICT-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// X86WINPRMTD_STRICT-NEXT: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[C_REAL]], metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT: [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[C_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
-// X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[CONV]]) #[[ATTR3]]
-// X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[CONV1]]) #[[ATTR3]]
+// X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[CONV]])
+// X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[CONV1]])
// X86WINPRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
// X86WINPRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// X86WINPRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
@@ -4038,8 +4038,8 @@ _Complex long double mulld(_Complex long double a, _Complex long double b) {
// PRMTD_STRICT-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
// PRMTD_STRICT-NEXT: [[CONV:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float [[C_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT: [[CONV1:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float [[C_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
-// PRMTD_STRICT-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]]) #[[ATTR4]]
-// PRMTD_STRICT-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]]) #[[ATTR4]]
+// PRMTD_STRICT-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
+// PRMTD_STRICT-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
// PRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR4]]
// PRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
// PRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
diff --git a/clang/test/CodeGen/isfpclass.c b/clang/test/CodeGen/isfpclass.c
index 1bf60b8fbca176f..58849eec340c67f 100644
--- a/clang/test/CodeGen/isfpclass.c
+++ b/clang/test/CodeGen/isfpclass.c
@@ -15,7 +15,7 @@ _Bool check_isfpclass_finite(float x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isfpclass_finite_strict
// CHECK-SAME: (float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 504) #[[ATTR5:[0-9]+]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 504)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isfpclass_finite_strict(float x) {
@@ -36,7 +36,7 @@ _Bool check_isfpclass_nan_f32(float x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isfpclass_nan_f32_strict
// CHECK-SAME: (float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 3) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 3)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isfpclass_nan_f32_strict(float x) {
@@ -57,7 +57,7 @@ _Bool check_isfpclass_snan_f64(double x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isfpclass_snan_f64_strict
// CHECK-SAME: (double noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[X]], i32 1) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f64(double [[X]], i32 1)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isfpclass_snan_f64_strict(double x) {
@@ -78,7 +78,7 @@ _Bool check_isfpclass_zero_f16(_Float16 x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isfpclass_zero_f16_strict
// CHECK-SAME: (half noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f16(half [[X]], i32 96) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f16(half [[X]], i32 96)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isfpclass_zero_f16_strict(_Float16 x) {
@@ -89,7 +89,7 @@ _Bool check_isfpclass_zero_f16_strict(_Float16 x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isnan
// CHECK-SAME: (float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 3) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 3)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isnan(float x) {
@@ -100,7 +100,7 @@ _Bool check_isnan(float x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isinf
// CHECK-SAME: (float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 516) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 516)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isinf(float x) {
@@ -111,7 +111,7 @@ _Bool check_isinf(float x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isfinite
// CHECK-SAME: (float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 504) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 504)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isfinite(float x) {
@@ -122,7 +122,7 @@ _Bool check_isfinite(float x) {
// CHECK-LABEL: define dso_local noundef i1 @check_isnormal
// CHECK-SAME: (float noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 264) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call i1 @llvm.is.fpclass.f32(float [[X]], i32 264)
// CHECK-NEXT: ret i1 [[TMP0]]
//
_Bool check_isnormal(float x) {
@@ -150,7 +150,7 @@ int4 check_isfpclass_nan_v4f32(float4 x) {
// CHECK-LABEL: define dso_local range(i32 0, 2) <4 x i32> @check_isfpclass_nan_strict_v4f32
// CHECK-SAME: (<4 x float> noundef [[X:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> [[X]], i32 3) #[[ATTR5]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x i1> @llvm.is.fpclass.v4f32(<4 x float> [[X]], i32 3)
// CHECK-NEXT: [[TMP1:%.*]] = zext <4 x i1> [[TMP0]] to <4 x i32>
// CHECK-NEXT: ret <4 x i32> [[TMP1]]
//
diff --git a/clang/test/CodeGen/strictfp-elementwise-bulitins.cpp b/clang/test/CodeGen/strictfp-elementwise-bulitins.cpp
index 175ad22601839da..479accbe05bdcd7 100644
--- a/clang/test/CodeGen/strictfp-elementwise-bulitins.cpp
+++ b/clang/test/CodeGen/strictfp-elementwise-bulitins.cpp
@@ -20,7 +20,7 @@ float4 strict_fadd(float4 a, float4 b) {
// CHECK-LABEL: define dso_local noundef <4 x float> @_Z22strict_elementwise_absDv4_f
// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2:[0-9]+]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[ELT_ABS:%.*]] = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> [[A]]) #[[ATTR4]]
+// CHECK-NEXT: [[ELT_ABS:%.*]] = tail call <4 x float> @llvm.fabs.v4f32(<4 x float> [[A]])
// CHECK-NEXT: ret <4 x float> [[ELT_ABS]]
//
float4 strict_elementwise_abs(float4 a) {
@@ -300,7 +300,7 @@ float4 strict_elementwise_trunc(float4 a) {
// CHECK-LABEL: define dso_local noundef <4 x float> @_Z31strict_elementwise_canonicalizeDv4_f
// CHECK-SAME: (<4 x float> noundef [[A:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[ELT_CANONICALIZE:%.*]] = tail call <4 x float> @llvm.canonicalize.v4f32(<4 x float> [[A]]) #[[ATTR4]]
+// CHECK-NEXT: [[ELT_CANONICALIZE:%.*]] = tail call <4 x float> @llvm.canonicalize.v4f32(<4 x float> [[A]])
// CHECK-NEXT: ret <4 x float> [[ELT_CANONICALIZE]]
//
float4 strict_elementwise_canonicalize(float4 a) {
@@ -310,7 +310,7 @@ float4 strict_elementwise_canonicalize(float4 a) {
// CHECK-LABEL: define dso_local noundef <4 x float> @_Z27strict_elementwise_copysignDv4_fS_
// CHECK-SAME: (<4 x float> noundef [[A:%.*]], <4 x float> noundef [[B:%.*]]) local_unnamed_addr #[[ATTR2]] {
// CHECK-NEXT: entry:
-// CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.copysign.v4f32(<4 x float> [[A]], <4 x float> [[B]]) #[[ATTR4]]
+// CHECK-NEXT: [[TMP0:%.*]] = tail call <4 x float> @llvm.copysign.v4f32(<4 x float> [[A]], <4 x float> [[B]])
// CHECK-NEXT: ret <4 x float> [[TMP0]]
//
float4 strict_elementwise_copysign(float4 a, float4 b) {
diff --git a/clang/test/CodeGen/strictfp_builtins.c b/clang/test/CodeGen/strictfp_builtins.c
index 053265dcc0667fa..30178dc3b2c9258 100644
--- a/clang/test/CodeGen/strictfp_builtins.c
+++ b/clang/test/CodeGen/strictfp_builtins.c
@@ -60,7 +60,7 @@ void test_fpclassify(double d) {
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 516)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.2, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -76,7 +76,7 @@ void test_fp16_isinf(_Float16 h) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 516)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.3, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -92,7 +92,7 @@ void test_float_isinf(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 516)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.4, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -108,7 +108,7 @@ void test_double_isinf(double d) {
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 504)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.5, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -124,7 +124,7 @@ void test_fp16_isfinite(_Float16 h) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 504)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.6, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -140,7 +140,7 @@ void test_float_isfinite(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 504)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.7, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -176,7 +176,7 @@ void test_isinf_sign(double d) {
// CHECK-NEXT: [[H_ADDR:%.*]] = alloca half, align 2
// CHECK-NEXT: store half [[H:%.*]], ptr [[H_ADDR]], align 2
// CHECK-NEXT: [[TMP0:%.*]] = load half, ptr [[H_ADDR]], align 2
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f16(half [[TMP0]], i32 3)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.9, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -192,7 +192,7 @@ void test_fp16_isnan(_Float16 h) {
// CHECK-NEXT: [[F_ADDR:%.*]] = alloca float, align 4
// CHECK-NEXT: store float [[F:%.*]], ptr [[F_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[F_ADDR]], align 4
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f32(float [[TMP0]], i32 3)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.10, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -208,7 +208,7 @@ void test_float_isnan(float f) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 3)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.11, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
@@ -224,7 +224,7 @@ void test_double_isnan(double d) {
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca double, align 8
// CHECK-NEXT: store double [[D:%.*]], ptr [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load double, ptr [[D_ADDR]], align 8
-// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264) #[[ATTR5]]
+// CHECK-NEXT: [[TMP1:%.*]] = call i1 @llvm.is.fpclass.f64(double [[TMP0]], i32 264)
// CHECK-NEXT: [[TMP2:%.*]] = zext i1 [[TMP1]] to i32
// CHECK-NEXT: call void @p(ptr noundef @.str.12, i32 noundef [[TMP2]]) #[[ATTR4]]
// CHECK-NEXT: ret void
diff --git a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
index 31f1aa60780b9e6..d263e563d0be5f4 100644
--- a/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
+++ b/clang/test/CodeGenOpenCL/cl20-device-side-enqueue-attributes.cl
@@ -109,7 +109,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
// STRICTFP-NEXT: store i32 0, ptr [[FLAGS]], align 4
// STRICTFP-NEXT: [[TMP0:%.*]] = load target("spirv.Queue"), ptr [[DEFAULT_QUEUE]], align 4
// STRICTFP-NEXT: [[TMP1:%.*]] = load i32, ptr [[FLAGS]], align 4
-// STRICTFP-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP]], ptr align 4 [[NDRANGE]], i32 4, i1 false) #[[ATTR5:[0-9]+]]
+// STRICTFP-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[TMP]], ptr align 4 [[NDRANGE]], i32 4, i1 false)
// STRICTFP-NEXT: [[BLOCK_SIZE:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr [[BLOCK]], i32 0, i32 0
// STRICTFP-NEXT: store i32 24, ptr [[BLOCK_SIZE]], align 4
// STRICTFP-NEXT: [[BLOCK_ALIGN:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr [[BLOCK]], i32 0, i32 1
@@ -126,7 +126,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
// STRICTFP-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr [[B_ADDR]], align 4
// STRICTFP-NEXT: store ptr addrspace(1) [[TMP4]], ptr [[BLOCK_CAPTURED2]], align 4
// STRICTFP-NEXT: [[TMP5:%.*]] = addrspacecast ptr [[BLOCK]] to ptr addrspace(4)
-// STRICTFP-NEXT: [[TMP6:%.*]] = call spir_func i32 @__enqueue_kernel_basic(target("spirv.Queue") [[TMP0]], i32 [[TMP1]], ptr [[TMP]], ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_kernel to ptr addrspace(4)), ptr addrspace(4) [[TMP5]]) #[[ATTR5]]
+// STRICTFP-NEXT: [[TMP6:%.*]] = call spir_func i32 @__enqueue_kernel_basic(target("spirv.Queue") [[TMP0]], i32 [[TMP1]], ptr [[TMP]], ptr addrspace(4) addrspacecast (ptr @__device_side_enqueue_block_invoke_kernel to ptr addrspace(4)), ptr addrspace(4) [[TMP5]])
// STRICTFP-NEXT: ret void
//
//
@@ -144,7 +144,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
// STRICTFP-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) [[BLOCK_CAPTURE_ADDR1]], align 4
// STRICTFP-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr addrspace(1) [[TMP0]], i32 [[TMP1]]
// STRICTFP-NEXT: [[TMP2:%.*]] = load float, ptr addrspace(1) [[ARRAYIDX]], align 4
-// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"rte"), "fpe.except"(metadata !"strict") ]
+// STRICTFP-NEXT: [[TMP3:%.*]] = call float @llvm.experimental.constrained.fmuladd.f32(float 4.000000e+00, float [[TMP2]], float 1.000000e+00, metadata !"round.tonearest", metadata !"fpexcept.strict") #[[ATTR5:[0-9]+]]
// STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR2:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 3
// STRICTFP-NEXT: [[TMP4:%.*]] = load ptr addrspace(1), ptr addrspace(4) [[BLOCK_CAPTURE_ADDR2]], align 4
// STRICTFP-NEXT: [[BLOCK_CAPTURE_ADDR3:%.*]] = getelementptr inbounds nuw <{ i32, i32, ptr addrspace(4), ptr addrspace(1), i32, ptr addrspace(1) }>, ptr addrspace(4) [[DOTBLOCK_DESCRIPTOR]], i32 0, i32 4
@@ -158,7 +158,7 @@ kernel void device_side_enqueue(global float *a, global float *b, int i) {
// STRICTFP-LABEL: define {{[^@]+}}@__device_side_enqueue_block_invoke_kernel
// STRICTFP-SAME: (ptr addrspace(4) [[TMP0:%.*]]) #[[ATTR4:[0-9]+]] {
// STRICTFP-NEXT: entry:
-// STRICTFP-NEXT: call spir_func void @__device_side_enqueue_block_invoke(ptr addrspace(4) [[TMP0]]) #[[ATTR5]]
+// STRICTFP-NEXT: call spir_func void @__device_side_enqueue_block_invoke(ptr addrspace(4) [[TMP0]])
// STRICTFP-NEXT: ret void
//
//.
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index db961ffdc856475..442384824ece42f 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -93,11 +93,15 @@ CallInst *IRBuilderBase::CreateCall(FunctionType *FTy, Value *Callee,
ArrayRef<OperandBundleDef> ActualBundlesRef = OpBundles;
SmallVector<OperandBundleDef, 2> ActualBundles;
+ bool doesCallAccessFPEnv = false;
if (IsFPConstrained) {
if (const auto *Func = dyn_cast<Function>(Callee)) {
+ // Some intrinsic functions in non-default FP mode must have FP operand
+ // bundles to indicate a side effect due to read/write FP environment.
if (Intrinsic::ID ID = Func->getIntrinsicID()) {
if (IntrinsicInst::canAccessFPEnvironment(ID)) {
bool NeedRound = true, NeedExcept = true;
+ doesCallAccessFPEnv = true;
for (const auto &Item : OpBundles) {
if (NeedRound && Item.getTag() == "fpe.control")
NeedRound = false;
@@ -112,11 +116,16 @@ CallInst *IRBuilderBase::CreateCall(FunctionType *FTy, Value *Callee,
ActualBundlesRef = ActualBundles;
}
}
+ // A call to a function that has the 'StrictFP' attribute makes the call
+ // site 'StrictFP' also.
+ else if (Func->hasFnAttribute(Attribute::StrictFP)) {
+ doesCallAccessFPEnv = true;
+ }
}
}
CallInst *CI = CallInst::Create(FTy, Callee, Args, ActualBundlesRef);
- if (IsFPConstrained)
+ if (IsFPConstrained && doesCallAccessFPEnv)
setConstrainedFPCallAttr(CI);
if (isa<FPMathOperator>(CI))
setFPAttrs(CI, FPMathTag, FMF);
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
index 418a98873eaa116..bdf73acefc7794e 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll
@@ -819,8 +819,8 @@ define float @test_pown_fast_f32_strictfp(float %x, i32 %y) #1 {
; CHECK-LABEL: define float @test_pown_fast_f32_strictfp
; CHECK-SAME: (float [[X:%.*]], i32 [[Y:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]]) #[[ATTR5:[0-9]+]]
-; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR5]]
+; CHECK-NEXT: [[__FABS:%.*]] = call fast float @llvm.fabs.f32(float [[X]])
+; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[__FABS]]) #[[ATTR5:[0-9]+]]
; CHECK-NEXT: [[POWNI2F:%.*]] = call fast float @llvm.experimental.constrained.sitofp.f32.i32(i32 [[Y]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
; CHECK-NEXT: [[__YLOGX:%.*]] = call fast float @llvm.experimental.constrained.fmul.f32(float [[POWNI2F]], float [[__LOG2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR5]] [ "fpe.control"(metadata !"dyn"), "fpe.except"(metadata !"strict") ]
; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) #[[ATTR5]]
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index 94d9092cda2b101..663a7a296d8e4f4 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -133,18 +133,18 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7:[0-9]+]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
@@ -153,7 +153,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
; IR-ITERATIVE-NEXT: br label [[TMP16]]
; IR-ITERATIVE: 16:
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]])
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -164,18 +164,18 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
; IR-ITERATIVE-NEXT: ret float [[TMP24]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8:[0-9]+]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
@@ -184,7 +184,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
; IR-DPP-NEXT: br label [[TMP16]]
; IR-DPP: 16:
; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]])
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -200,23 +200,23 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP23:%.*]] syncscope("one-as") monotonic, align 4
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP22:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], float [[TMP14]], float [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -227,10 +227,10 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -242,31 +242,31 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP9]], float [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP13]], float [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP15]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP17]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP19]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -274,8 +274,8 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP30]], float [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], float [[TMP30]], float [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
@@ -289,16 +289,16 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -309,7 +309,7 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
; IR-ITERATIVE-NEXT: br label [[TMP16]]
; IR-ITERATIVE: 16:
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]])
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -320,16 +320,16 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
; IR-ITERATIVE-NEXT: ret float [[TMP24]]
;
; IR-DPP-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -340,7 +340,7 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
; IR-DPP-NEXT: br label [[TMP16]]
; IR-DPP: 16:
; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]])
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -356,23 +356,23 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_uni_value_agent_scope_str
define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[TMP23:%.*]] syncscope("agent") monotonic, align 4
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[TMP14]], float [[TMP22:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], float [[TMP14]], float [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -383,10 +383,10 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -398,31 +398,31 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP9]], float [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP13]], float [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP15]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP17]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP19]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -430,8 +430,8 @@ define amdgpu_ps float @global_atomic_fsub_uni_address_div_value_agent_scope_str
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[TMP30]], float [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], float [[TMP30]], float [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
@@ -566,15 +566,15 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_div_value_agent_scope_uns
define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP19:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
; IR-ITERATIVE: 10:
@@ -582,7 +582,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
; IR-ITERATIVE-NEXT: br label [[TMP12]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP9]], float 0x7FF8000000000000, float [[VAL]]
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP14]], float [[TMP16]], metadata !"fpexcept.strict") #[[ATTR7]]
@@ -593,15 +593,15 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
; IR-ITERATIVE-NEXT: ret float [[TMP20]]
;
; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP19:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
; IR-DPP: 10:
@@ -609,7 +609,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
; IR-DPP-NEXT: br label [[TMP12]]
; IR-DPP: 12:
; IR-DPP-NEXT: [[TMP13:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]])
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP16:%.*]] = select i1 [[TMP9]], float 0x7FF8000000000000, float [[VAL]]
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP14]], float [[TMP16]], metadata !"fpexcept.strict") #[[ATTR8]]
@@ -625,23 +625,23 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1{
; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[TMP23:%.*]] syncscope("agent") monotonic, align 4
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP14]], float [[TMP22:%.*]], metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], float [[TMP14]], float [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -652,10 +652,10 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ 0x7FF8000000000000, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call float @llvm.experimental.constrained.maxnum.f32(float [[ACCUMULATOR]], float [[TMP21]], metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -667,31 +667,31 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float 0x7FF8000000000000) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float 0x7FF8000000000000)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP9]], float [[TMP10]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP11]], float [[TMP12]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP13]], float [[TMP14]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP15]], float [[TMP16]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP17]], float [[TMP18]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP19]], float [[TMP20]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -699,8 +699,8 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP30]], float [[TMP31]], metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], float [[TMP30]], float [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
@@ -714,16 +714,16 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -734,7 +734,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
; IR-ITERATIVE-NEXT: br label [[TMP16]]
; IR-ITERATIVE: 16:
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]])
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -745,16 +745,16 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
; IR-ITERATIVE-NEXT: ret float [[TMP24]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -765,7 +765,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
; IR-DPP-NEXT: br label [[TMP16]]
; IR-DPP: 16:
; IR-DPP-NEXT: [[TMP17:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP17]])
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL]], float [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP18]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -781,23 +781,23 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_system_scope_st
define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP23:%.*]] monotonic, align 4
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi float [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP14]], float [[TMP22:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], float [[TMP14]], float [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -808,10 +808,10 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call float @llvm.amdgcn.writelane.f32(float [[ACCUMULATOR]], i32 [[TMP20]], float [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -823,31 +823,31 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP9]], float [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP13]], float [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP15]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP17]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP19]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -855,8 +855,8 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_system_scope_st
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi float [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call float @llvm.amdgcn.readfirstlane.f32(float [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP30]], float [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], float [[TMP30]], float [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
@@ -1103,16 +1103,16 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_a
define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1123,7 +1123,7 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
; IR-ITERATIVE-NEXT: br label [[TMP16]]
; IR-ITERATIVE: 16:
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]])
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP18]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1134,16 +1134,16 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
; IR-ITERATIVE-NEXT: ret double [[TMP24]]
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1154,7 +1154,7 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
; IR-DPP-NEXT: br label [[TMP16]]
; IR-DPP: 16:
; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]])
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP18]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1170,23 +1170,23 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP23:%.*]] syncscope("one-as") monotonic, align 8
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[TMP22:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], double [[TMP14]], double [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -1197,10 +1197,10 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ -0.000000e+00, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call double @llvm.experimental.constrained.fadd.f64(double [[ACCUMULATOR]], double [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -1212,31 +1212,31 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP9]], double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP11]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP13]], double [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP15]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP17]], double [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP19]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -1244,8 +1244,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP30]], double [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], double [[TMP30]], double [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
@@ -1259,16 +1259,16 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_
define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1279,7 +1279,7 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_s
; IR-ITERATIVE-NEXT: br label [[TMP16]]
; IR-ITERATIVE: 16:
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]])
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP18]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1290,16 +1290,16 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_s
; IR-ITERATIVE-NEXT: ret double [[TMP24]]
;
; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1310,7 +1310,7 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_s
; IR-DPP-NEXT: br label [[TMP16]]
; IR-DPP: 16:
; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]])
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP18]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1326,23 +1326,23 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_uni_value_agent_s
define amdgpu_ps double @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[TMP23:%.*]] syncscope("agent") monotonic, align 8
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP14]], double [[TMP22:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], double [[TMP14]], double [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -1353,10 +1353,10 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_div_value_agent_s
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ -0.000000e+00, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call double @llvm.experimental.constrained.fadd.f64(double [[ACCUMULATOR]], double [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -1368,31 +1368,31 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_div_value_agent_s
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP9]], double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP11]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP13]], double [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP15]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP17]], double [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP19]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -1400,8 +1400,8 @@ define amdgpu_ps double @global_atomic_fsub_double_uni_address_div_value_agent_s
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP30]], double [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], double [[TMP30]], double [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
@@ -1536,15 +1536,15 @@ define amdgpu_ps double @global_atomic_fmin_double_uni_address_div_value_agent_s
define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP19:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
; IR-ITERATIVE: 10:
@@ -1552,7 +1552,7 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
; IR-ITERATIVE-NEXT: br label [[TMP12]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP9]], double 0x7FF8000000000000, double [[VAL]]
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP14]], double [[TMP16]], metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1563,15 +1563,15 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
; IR-ITERATIVE-NEXT: ret double [[TMP20]]
;
; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP19:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
; IR-DPP: 10:
@@ -1579,7 +1579,7 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
; IR-DPP-NEXT: br label [[TMP12]]
; IR-DPP: 12:
; IR-DPP-NEXT: [[TMP13:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP11]], [[TMP10]] ]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]])
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP16:%.*]] = select i1 [[TMP9]], double 0x7FF8000000000000, double [[VAL]]
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP14]], double [[TMP16]], metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1595,23 +1595,23 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1{
; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[TMP23:%.*]] syncscope("agent") monotonic, align 8
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP14]], double [[TMP22:%.*]], metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], double [[TMP14]], double [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -1622,10 +1622,10 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ 0x7FF8000000000000, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call double @llvm.experimental.constrained.maxnum.f64(double [[ACCUMULATOR]], double [[TMP21]], metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -1637,31 +1637,31 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double 0x7FF8000000000000) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double 0x7FF8000000000000)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP9]], double [[TMP10]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP11]], double [[TMP12]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP13]], double [[TMP14]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP15]], double [[TMP16]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP17]], double [[TMP18]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP19]], double [[TMP20]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -1669,8 +1669,8 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP30]], double [[TMP31]], metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], double [[TMP30]], double [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
@@ -1684,16 +1684,16 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_
define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1704,7 +1704,7 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_
; IR-ITERATIVE-NEXT: br label [[TMP16]]
; IR-ITERATIVE: 16:
; IR-ITERATIVE-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]])
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP18]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1715,16 +1715,16 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_
; IR-ITERATIVE-NEXT: ret double [[TMP24]]
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP23:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1735,7 +1735,7 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_
; IR-DPP-NEXT: br label [[TMP16]]
; IR-DPP: 16:
; IR-DPP-NEXT: [[TMP17:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP15]], [[TMP14]] ]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP17]])
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL]], double [[TMP19]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP18]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1751,23 +1751,23 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_system_
define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP23:%.*]] monotonic, align 4
; IR-ITERATIVE-NEXT: br label [[TMP12:%.*]]
; IR-ITERATIVE: 12:
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = phi double [ poison, [[COMPUTEEND:%.*]] ], [ [[TMP11]], [[TMP10:%.*]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP13]])
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[TMP22:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = select i1 [[TMP28:%.*]], double [[TMP14]], double [[TMP15]]
; IR-ITERATIVE-NEXT: br label [[TMP17]]
@@ -1778,10 +1778,10 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_system_
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ -0.000000e+00, [[TMP2]] ], [ [[TMP23]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[OLDVALUEPHI:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP22]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP26:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
-; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP20]])
+; IR-ITERATIVE-NEXT: [[TMP22]] = call double @llvm.amdgcn.writelane.f64(double [[ACCUMULATOR]], i32 [[TMP20]], double [[OLDVALUEPHI]])
; IR-ITERATIVE-NEXT: [[TMP23]] = call double @llvm.experimental.constrained.fadd.f64(double [[ACCUMULATOR]], double [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = shl i64 1, [[TMP19]]
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = xor i64 [[TMP24]], -1
@@ -1793,31 +1793,31 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_system_
; IR-ITERATIVE-NEXT: br i1 [[TMP28]], label [[TMP10]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP34:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP9]], double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP11]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP13]], double [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP15]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP17]], double [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP19]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP21]], i32 312, i32 15, i32 15, i1 false) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP21]], i32 312, i32 15, i32 15, i1 false)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP24:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP23]])
; IR-DPP-NEXT: [[TMP25:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP25]], label [[TMP26:%.*]], label [[TMP28:%.*]]
; IR-DPP: 26:
@@ -1825,8 +1825,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_system_
; IR-DPP-NEXT: br label [[TMP28]]
; IR-DPP: 28:
; IR-DPP-NEXT: [[TMP29:%.*]] = phi double [ poison, [[TMP2]] ], [ [[TMP27]], [[TMP26]] ]
-; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP30:%.*]] = call double @llvm.amdgcn.readfirstlane.f64(double [[TMP29]])
+; IR-DPP-NEXT: [[TMP31:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP32:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP30]], double [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP33:%.*]] = select i1 [[TMP25]], double [[TMP30]], double [[TMP32]]
; IR-DPP-NEXT: br label [[TMP34]]
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
index af38d6e27f6ffcc..1f53b1c1e2ef390 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
@@ -112,18 +112,18 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_scope_agent_scop
define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7:[0-9]+]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
@@ -136,18 +136,18 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_uns
; IR-ITERATIVE-NEXT: ret void
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
-; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8:[0-9]+]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP13:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP13]], label [[TMP14:%.*]], label [[TMP16:%.*]]
@@ -165,16 +165,16 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_uns
define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP17:%.*]] syncscope("one-as") monotonic, align 4
@@ -186,9 +186,9 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_uns
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -200,30 +200,30 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_uns
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP9]], float [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP13]], float [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP15]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP17]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP19]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
@@ -240,16 +240,16 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_uns
define amdgpu_ps void @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -264,16 +264,16 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_uni_value_agent_scope_stri
; IR-ITERATIVE-NEXT: ret void
;
; IR-DPP-LABEL: @global_atomic_fsub_uni_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -293,16 +293,16 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_uni_value_agent_scope_stri
define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], float [[TMP17:%.*]] syncscope("agent") monotonic, align 4
@@ -314,9 +314,9 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_stri
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -328,30 +328,30 @@ define amdgpu_ps void @global_atomic_fsub_uni_address_div_value_agent_scope_stri
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fsub_uni_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP9]], float [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP13]], float [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP15]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP17]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP19]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
@@ -467,45 +467,25 @@ define amdgpu_ps void @global_atomic_fmin_uni_address_div_value_agent_scope_unsa
}
define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-ITERATIVE: 10:
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-ITERATIVE-NEXT: br label [[TMP12]]
-; IR-ITERATIVE: 12:
-; IR-ITERATIVE-NEXT: br label [[TMP13]]
-; IR-ITERATIVE: 13:
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-DPP: 10:
-; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
-; IR-DPP-NEXT: br label [[TMP12]]
-; IR-DPP: 12:
-; IR-DPP-NEXT: br label [[TMP13]]
-; IR-DPP: 13:
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: br label [[TMP13]]
+; IR: 13:
+; IR-NEXT: ret void
;
%result = atomicrmw fmax ptr addrspace(1) %ptr, float %val syncscope("agent") monotonic
ret void
@@ -513,16 +493,16 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsa
define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1{
; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[TMP17:%.*]] syncscope("agent") monotonic, align 4
@@ -534,9 +514,9 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsa
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ 0x7FF8000000000000, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call float @llvm.experimental.constrained.maxnum.f32(float [[ACCUMULATOR]], float [[TMP16]], metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -548,30 +528,30 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsa
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float 0x7FF8000000000000) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float 0x7FF8000000000000)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP9]], float [[TMP10]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP11]], float [[TMP12]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP13]], float [[TMP14]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP15]], float [[TMP16]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP17]], float [[TMP18]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float 0x7FF8000000000000, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.maxnum.f32(float [[TMP19]], float [[TMP20]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
@@ -588,16 +568,16 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsa
define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -612,16 +592,16 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_system_scope_str
; IR-ITERATIVE-NEXT: ret void
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[VAL:%.*]], float [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -641,16 +621,16 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_system_scope_str
define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, float %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[TMP17:%.*]] monotonic, align 4
@@ -662,9 +642,9 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_system_scope_str
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi float [ -0.000000e+00, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call float @llvm.experimental.constrained.fadd.f32(float [[ACCUMULATOR]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -676,30 +656,30 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_system_scope_str
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call float @llvm.amdgcn.set.inactive.f32(float [[VAL:%.*]], float -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP9]], float [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP11]], float [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP13]], float [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP15]], float [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP17]], float [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call float @llvm.amdgcn.update.dpp.f32(float -0.000000e+00, float [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[TMP19]], float [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call float @llvm.amdgcn.readlane.f32(float [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call float @llvm.amdgcn.strict.wwm.f32(float [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
@@ -928,16 +908,16 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_scope_age
define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -952,16 +932,16 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_sc
; IR-ITERATIVE-NEXT: ret void
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -981,16 +961,16 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_sc
define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP17:%.*]] syncscope("one-as") monotonic, align 8
@@ -1002,9 +982,9 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_sc
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ -0.000000e+00, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call double @llvm.experimental.constrained.fadd.f64(double [[ACCUMULATOR]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -1016,30 +996,30 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_sc
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP9]], double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP11]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP13]], double [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP15]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP17]], double [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP19]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
@@ -1056,16 +1036,16 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_sc
define amdgpu_ps void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1080,16 +1060,16 @@ define amdgpu_ps void @global_atomic_fsub_double_uni_address_uni_value_agent_sco
; IR-ITERATIVE-NEXT: ret void
;
; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_uni_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1109,16 +1089,16 @@ define amdgpu_ps void @global_atomic_fsub_double_uni_address_uni_value_agent_sco
define amdgpu_ps void @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fsub ptr addrspace(1) [[PTR:%.*]], double [[TMP17:%.*]] syncscope("agent") monotonic, align 8
@@ -1130,9 +1110,9 @@ define amdgpu_ps void @global_atomic_fsub_double_uni_address_div_value_agent_sco
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ -0.000000e+00, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call double @llvm.experimental.constrained.fadd.f64(double [[ACCUMULATOR]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -1144,30 +1124,30 @@ define amdgpu_ps void @global_atomic_fsub_double_uni_address_div_value_agent_sco
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fsub_double_uni_address_div_value_agent_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP9]], double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP11]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP13]], double [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP15]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP17]], double [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP19]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
@@ -1283,45 +1263,25 @@ define amdgpu_ps void @global_atomic_fmin_double_uni_address_div_value_agent_sco
}
define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
-; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-ITERATIVE-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-ITERATIVE: 10:
-; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
-; IR-ITERATIVE-NEXT: br label [[TMP12]]
-; IR-ITERATIVE: 12:
-; IR-ITERATIVE-NEXT: br label [[TMP13]]
-; IR-ITERATIVE: 13:
-; IR-ITERATIVE-NEXT: ret void
-;
-; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
-; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
-; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
-; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
-; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
-; IR-DPP-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
-; IR-DPP: 10:
-; IR-DPP-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
-; IR-DPP-NEXT: br label [[TMP12]]
-; IR-DPP: 12:
-; IR-DPP-NEXT: br label [[TMP13]]
-; IR-DPP: 13:
-; IR-DPP-NEXT: ret void
+; IR-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
+; IR-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
+; IR-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
+; IR: 2:
+; IR-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
+; IR-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
+; IR-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
+; IR-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
+; IR-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-NEXT: [[TMP9:%.*]] = icmp eq i32 [[TMP8]], 0
+; IR-NEXT: br i1 [[TMP9]], label [[TMP10:%.*]], label [[TMP12:%.*]]
+; IR: 10:
+; IR-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
+; IR-NEXT: br label [[TMP12]]
+; IR: 12:
+; IR-NEXT: br label [[TMP13]]
+; IR: 13:
+; IR-NEXT: ret void
;
%result = atomicrmw fmax ptr addrspace(1) %ptr, double %val syncscope("agent") monotonic
ret void
@@ -1329,16 +1289,16 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_sco
define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1{
; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[TMP17:%.*]] syncscope("agent") monotonic, align 8
@@ -1350,9 +1310,9 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_sco
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ 0x7FF8000000000000, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call double @llvm.experimental.constrained.maxnum.f64(double [[ACCUMULATOR]], double [[TMP16]], metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -1364,30 +1324,30 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_sco
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double 0x7FF8000000000000) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double 0x7FF8000000000000)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP9]], double [[TMP10]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP11]], double [[TMP12]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP13]], double [[TMP14]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP15]], double [[TMP16]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP17]], double [[TMP18]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double 0x7FF8000000000000, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.maxnum.f64(double [[TMP19]], double [[TMP20]], metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
@@ -1404,16 +1364,16 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_sco
define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-ITERATIVE-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
@@ -1428,16 +1388,16 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_system_sc
; IR-ITERATIVE-NEXT: ret void
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call i64 @llvm.ctpop.i64(i64 [[TMP3]])
; IR-DPP-NEXT: [[TMP10:%.*]] = trunc i64 [[TMP9]] to i32
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i32(i32 [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[VAL:%.*]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
@@ -1457,16 +1417,16 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_system_sc
define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(ptr addrspace(1) inreg %ptr, double %val) #2 {
; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
-; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
-; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-ITERATIVE-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-ITERATIVE-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR7]]
-; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-ITERATIVE-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-ITERATIVE-NEXT: [[TMP9:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-ITERATIVE-NEXT: br label [[COMPUTELOOP:%.*]]
; IR-ITERATIVE: 10:
; IR-ITERATIVE-NEXT: [[TMP11:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[TMP17:%.*]] monotonic, align 4
@@ -1478,9 +1438,9 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_system_sc
; IR-ITERATIVE: ComputeLoop:
; IR-ITERATIVE-NEXT: [[ACCUMULATOR:%.*]] = phi double [ -0.000000e+00, [[TMP2]] ], [ [[TMP17]], [[COMPUTELOOP]] ]
; IR-ITERATIVE-NEXT: [[ACTIVEBITS:%.*]] = phi i64 [ [[TMP9]], [[TMP2]] ], [ [[TMP20:%.*]], [[COMPUTELOOP]] ]
-; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP14:%.*]] = call i64 @llvm.cttz.i64(i64 [[ACTIVEBITS]], i1 true)
; IR-ITERATIVE-NEXT: [[TMP15:%.*]] = trunc i64 [[TMP14]] to i32
-; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]]) #[[ATTR7]]
+; IR-ITERATIVE-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[VAL:%.*]], i32 [[TMP15]])
; IR-ITERATIVE-NEXT: [[TMP17]] = call double @llvm.experimental.constrained.fadd.f64(double [[ACCUMULATOR]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR7]]
; IR-ITERATIVE-NEXT: [[TMP18:%.*]] = shl i64 1, [[TMP14]]
; IR-ITERATIVE-NEXT: [[TMP19:%.*]] = xor i64 [[TMP18]], -1
@@ -1492,30 +1452,30 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_system_sc
; IR-ITERATIVE-NEXT: br i1 [[TMP22]], label [[TMP10:%.*]], label [[TMP12]]
;
; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_div_value_system_scope_strictfp(
-; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live()
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP28:%.*]]
; IR-DPP: 2:
-; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 true)
; IR-DPP-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
; IR-DPP-NEXT: [[TMP5:%.*]] = lshr i64 [[TMP3]], 32
; IR-DPP-NEXT: [[TMP6:%.*]] = trunc i64 [[TMP5]] to i32
-; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]]) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP7:%.*]] = call i32 @llvm.amdgcn.mbcnt.lo(i32 [[TMP4]], i32 0)
+; IR-DPP-NEXT: [[TMP8:%.*]] = call i32 @llvm.amdgcn.mbcnt.hi(i32 [[TMP6]], i32 [[TMP7]])
+; IR-DPP-NEXT: [[TMP9:%.*]] = call double @llvm.amdgcn.set.inactive.f64(double [[VAL:%.*]], double -0.000000e+00)
+; IR-DPP-NEXT: [[TMP10:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP9]], i32 273, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP9]], double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP12:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP11]], i32 274, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP11]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP14:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP13]], i32 276, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP13]], double [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP16:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP15]], i32 280, i32 15, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP15]], double [[TMP16]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP18:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP17]], i32 322, i32 10, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP17]], double [[TMP18]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP20:%.*]] = call double @llvm.amdgcn.update.dpp.f64(double -0.000000e+00, double [[TMP19]], i32 323, i32 12, i32 15, i1 false)
; IR-DPP-NEXT: [[TMP21:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP19]], double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63) #[[ATTR8]]
-; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]]) #[[ATTR8]]
+; IR-DPP-NEXT: [[TMP22:%.*]] = call double @llvm.amdgcn.readlane.f64(double [[TMP21]], i32 63)
+; IR-DPP-NEXT: [[TMP23:%.*]] = call double @llvm.amdgcn.strict.wwm.f64(double [[TMP22]])
; IR-DPP-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-DPP-NEXT: br i1 [[TMP24]], label [[TMP25:%.*]], label [[TMP27:%.*]]
; IR-DPP: 25:
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
index 84d9a64efa0f7d0..4b24ce6f88d8a9f 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -1848,7 +1848,7 @@ define bfloat @test_atomicrmw_fadd_bf16_global_system_align4(ptr addrspace(1) %p
define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bfloat %value) #2 {
; ALL-LABEL: @test_atomicrmw_fadd_bf16_local_strictfp(
-; ALL-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4) #[[ATTR8:[0-9]+]]
+; ALL-NEXT: [[ALIGNEDADDR:%.*]] = call ptr addrspace(3) @llvm.ptrmask.p3.i32(ptr addrspace(3) [[PTR:%.*]], i32 -4)
; ALL-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(3) [[PTR]] to i32
; ALL-NEXT: [[PTRLSB:%.*]] = and i32 [[TMP1]], 3
; ALL-NEXT: [[TMP2:%.*]] = shl i32 [[PTRLSB]], 3
@@ -1861,7 +1861,7 @@ define bfloat @test_atomicrmw_fadd_bf16_local_strictfp(ptr addrspace(3) %ptr, bf
; ALL-NEXT: [[SHIFTED:%.*]] = lshr i32 [[LOADED]], [[TMP2]]
; ALL-NEXT: [[EXTRACTED:%.*]] = trunc i32 [[SHIFTED]] to i16
; ALL-NEXT: [[TMP4:%.*]] = bitcast i16 [[EXTRACTED]] to bfloat
-; ALL-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8]]
+; ALL-NEXT: [[NEW:%.*]] = call bfloat @llvm.experimental.constrained.fadd.bf16(bfloat [[TMP4]], bfloat [[VALUE:%.*]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR8:[0-9]+]]
; ALL-NEXT: [[TMP5:%.*]] = bitcast bfloat [[NEW]] to i16
; ALL-NEXT: [[EXTENDED:%.*]] = zext i16 [[TMP5]] to i32
; ALL-NEXT: [[SHIFTED1:%.*]] = shl nuw i32 [[EXTENDED]], [[TMP2]]
diff --git a/llvm/test/Transforms/HardwareLoops/scalar-while-strictfp.ll b/llvm/test/Transforms/HardwareLoops/scalar-while-strictfp.ll
index 951aacc06536284..c52197fd677a9f6 100644
--- a/llvm/test/Transforms/HardwareLoops/scalar-while-strictfp.ll
+++ b/llvm/test/Transforms/HardwareLoops/scalar-while-strictfp.ll
@@ -10,14 +10,14 @@ define void @while_lt(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-DEC-NEXT: br i1 [[CMP4]], label [[WHILE_BODY_PREHEADER:%.*]], label [[WHILE_END:%.*]]
; CHECK-DEC: while.body.preheader:
; CHECK-DEC-NEXT: [[TMP0:%.*]] = sub i32 [[N]], [[I]]
-; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[TMP0]]) #[[ATTR0:[0-9]+]]
+; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[TMP0]])
; CHECK-DEC-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-DEC: while.body:
; CHECK-DEC-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ [[I]], [[WHILE_BODY_PREHEADER]] ]
; CHECK-DEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-DEC-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-DEC-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-DEC-NEXT: [[TMP1:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1) #[[ATTR0]]
+; CHECK-DEC-NEXT: [[TMP1:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1)
; CHECK-DEC-NEXT: br i1 [[TMP1]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-DEC: while.end:
; CHECK-DEC-NEXT: ret void
@@ -29,7 +29,7 @@ define void @while_lt(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: br i1 [[CMP4]], label [[WHILE_BODY_PREHEADER:%.*]], label [[WHILE_END:%.*]]
; CHECK-PHI: while.body.preheader:
; CHECK-PHI-NEXT: [[TMP0:%.*]] = sub i32 [[N]], [[I]]
-; CHECK-PHI-NEXT: [[TMP1:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP0]]) #[[ATTR0:[0-9]+]]
+; CHECK-PHI-NEXT: [[TMP1:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP0]])
; CHECK-PHI-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-PHI: while.body:
; CHECK-PHI-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ [[I]], [[WHILE_BODY_PREHEADER]] ]
@@ -37,7 +37,7 @@ define void @while_lt(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-PHI-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-PHI-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-PHI-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP2]], i32 1) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP2]], i32 1)
; CHECK-PHI-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-PHI-NEXT: br i1 [[TMP4]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-PHI: while.end:
@@ -67,14 +67,14 @@ define void @while_gt(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-DEC-NEXT: br i1 [[CMP4]], label [[WHILE_BODY_PREHEADER:%.*]], label [[WHILE_END:%.*]]
; CHECK-DEC: while.body.preheader:
; CHECK-DEC-NEXT: [[TMP0:%.*]] = sub i32 [[I]], [[N]]
-; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[TMP0]]) #[[ATTR0]]
+; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[TMP0]])
; CHECK-DEC-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-DEC: while.body:
; CHECK-DEC-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[I]], [[WHILE_BODY_PREHEADER]] ]
; CHECK-DEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-DEC-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-DEC-NEXT: [[DEC]] = add nsw i32 [[I_ADDR_05]], -1
-; CHECK-DEC-NEXT: [[TMP1:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1) #[[ATTR0]]
+; CHECK-DEC-NEXT: [[TMP1:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1)
; CHECK-DEC-NEXT: br i1 [[TMP1]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-DEC: while.end:
; CHECK-DEC-NEXT: ret void
@@ -86,7 +86,7 @@ define void @while_gt(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: br i1 [[CMP4]], label [[WHILE_BODY_PREHEADER:%.*]], label [[WHILE_END:%.*]]
; CHECK-PHI: while.body.preheader:
; CHECK-PHI-NEXT: [[TMP0:%.*]] = sub i32 [[I]], [[N]]
-; CHECK-PHI-NEXT: [[TMP1:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP0]]) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP1:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP0]])
; CHECK-PHI-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-PHI: while.body:
; CHECK-PHI-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[I]], [[WHILE_BODY_PREHEADER]] ]
@@ -94,7 +94,7 @@ define void @while_gt(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-PHI-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-PHI-NEXT: [[DEC]] = add nsw i32 [[I_ADDR_05]], -1
-; CHECK-PHI-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP2]], i32 1) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP3]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP2]], i32 1)
; CHECK-PHI-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
; CHECK-PHI-NEXT: br i1 [[TMP4]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-PHI: while.end:
@@ -125,14 +125,14 @@ define void @while_gte(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-DEC: while.body.preheader:
; CHECK-DEC-NEXT: [[TMP0:%.*]] = add i32 [[I]], 1
; CHECK-DEC-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[N]]
-; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[TMP1]])
; CHECK-DEC-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-DEC: while.body:
; CHECK-DEC-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[I]], [[WHILE_BODY_PREHEADER]] ]
; CHECK-DEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-DEC-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-DEC-NEXT: [[DEC]] = add nsw i32 [[I_ADDR_05]], -1
-; CHECK-DEC-NEXT: [[TMP2:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1) #[[ATTR0]]
+; CHECK-DEC-NEXT: [[TMP2:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1)
; CHECK-DEC-NEXT: br i1 [[TMP2]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-DEC: while.end:
; CHECK-DEC-NEXT: ret void
@@ -145,7 +145,7 @@ define void @while_gte(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI: while.body.preheader:
; CHECK-PHI-NEXT: [[TMP0:%.*]] = add i32 [[I]], 1
; CHECK-PHI-NEXT: [[TMP1:%.*]] = sub i32 [[TMP0]], [[N]]
-; CHECK-PHI-NEXT: [[TMP2:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP1]]) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP2:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[TMP1]])
; CHECK-PHI-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-PHI: while.body:
; CHECK-PHI-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[I]], [[WHILE_BODY_PREHEADER]] ]
@@ -153,7 +153,7 @@ define void @while_gte(i32 %i, i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-PHI-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-PHI-NEXT: [[DEC]] = add nsw i32 [[I_ADDR_05]], -1
-; CHECK-PHI-NEXT: [[TMP4]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP3]], i32 1) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP4]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP3]], i32 1)
; CHECK-PHI-NEXT: [[TMP5:%.*]] = icmp ne i32 [[TMP4]], 0
; CHECK-PHI-NEXT: br i1 [[TMP5]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-PHI: while.end:
@@ -182,14 +182,14 @@ define void @while_ne(i32 %N, ptr nocapture %A) strictfp {
; CHECK-DEC-NEXT: [[CMP:%.*]] = icmp ne i32 [[N:%.*]], 0
; CHECK-DEC-NEXT: br i1 [[CMP]], label [[WHILE_BODY_PREHEADER:%.*]], label [[WHILE_END:%.*]]
; CHECK-DEC: while.body.preheader:
-; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]])
; CHECK-DEC-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-DEC: while.body:
; CHECK-DEC-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ 0, [[WHILE_BODY_PREHEADER]] ]
; CHECK-DEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-DEC-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-DEC-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1) #[[ATTR0]]
+; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1)
; CHECK-DEC-NEXT: br i1 [[TMP0]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-DEC: while.end:
; CHECK-DEC-NEXT: ret void
@@ -200,7 +200,7 @@ define void @while_ne(i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[CMP:%.*]] = icmp ne i32 [[N:%.*]], 0
; CHECK-PHI-NEXT: br i1 [[CMP]], label [[WHILE_BODY_PREHEADER:%.*]], label [[WHILE_END:%.*]]
; CHECK-PHI: while.body.preheader:
-; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]])
; CHECK-PHI-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-PHI: while.body:
; CHECK-PHI-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ 0, [[WHILE_BODY_PREHEADER]] ]
@@ -208,7 +208,7 @@ define void @while_ne(i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-PHI-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-PHI-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1)
; CHECK-PHI-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-PHI-NEXT: br i1 [[TMP3]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-PHI: while.end:
@@ -237,14 +237,14 @@ define void @while_eq(i32 %N, ptr nocapture %A) strictfp {
; CHECK-DEC-NEXT: [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-DEC-NEXT: br i1 [[CMP]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
; CHECK-DEC: while.body.preheader:
-; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]])
; CHECK-DEC-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-DEC: while.body:
; CHECK-DEC-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ 0, [[WHILE_BODY_PREHEADER]] ]
; CHECK-DEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-DEC-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-DEC-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1) #[[ATTR0]]
+; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1)
; CHECK-DEC-NEXT: br i1 [[TMP0]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-DEC: while.end:
; CHECK-DEC-NEXT: ret void
@@ -255,7 +255,7 @@ define void @while_eq(i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-PHI-NEXT: br i1 [[CMP]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
; CHECK-PHI: while.body.preheader:
-; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]])
; CHECK-PHI-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-PHI: while.body:
; CHECK-PHI-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ 0, [[WHILE_BODY_PREHEADER]] ]
@@ -263,7 +263,7 @@ define void @while_eq(i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-PHI-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-PHI-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1)
; CHECK-PHI-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-PHI-NEXT: br i1 [[TMP3]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-PHI: while.end:
@@ -294,14 +294,14 @@ define void @while_preheader_eq(i32 %N, ptr nocapture %A) strictfp {
; CHECK-DEC-NEXT: [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-DEC-NEXT: br i1 [[CMP]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
; CHECK-DEC: while.body.preheader:
-; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]])
; CHECK-DEC-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-DEC: while.body:
; CHECK-DEC-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ 0, [[WHILE_BODY_PREHEADER]] ]
; CHECK-DEC-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-DEC-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-DEC-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1) #[[ATTR0]]
+; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1)
; CHECK-DEC-NEXT: br i1 [[TMP0]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-DEC: while.end:
; CHECK-DEC-NEXT: ret void
@@ -314,7 +314,7 @@ define void @while_preheader_eq(i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[CMP:%.*]] = icmp eq i32 [[N:%.*]], 0
; CHECK-PHI-NEXT: br i1 [[CMP]], label [[WHILE_END:%.*]], label [[WHILE_BODY_PREHEADER:%.*]]
; CHECK-PHI: while.body.preheader:
-; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]])
; CHECK-PHI-NEXT: br label [[WHILE_BODY:%.*]]
; CHECK-PHI: while.body:
; CHECK-PHI-NEXT: [[I_ADDR_05:%.*]] = phi i32 [ [[INC:%.*]], [[WHILE_BODY]] ], [ 0, [[WHILE_BODY_PREHEADER]] ]
@@ -322,7 +322,7 @@ define void @while_preheader_eq(i32 %N, ptr nocapture %A) strictfp {
; CHECK-PHI-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[I_ADDR_05]]
; CHECK-PHI-NEXT: store i32 [[I_ADDR_05]], ptr [[ARRAYIDX]], align 4
; CHECK-PHI-NEXT: [[INC]] = add nuw i32 [[I_ADDR_05]], 1
-; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1)
; CHECK-PHI-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-PHI-NEXT: br i1 [[TMP3]], label [[WHILE_BODY]], label [[WHILE_END]]
; CHECK-PHI: while.end:
@@ -356,7 +356,7 @@ define void @nested(ptr nocapture %A, i32 %N) strictfp {
; CHECK-DEC: while.cond1.preheader.us:
; CHECK-DEC-NEXT: [[I_021_US:%.*]] = phi i32 [ [[INC6_US:%.*]], [[WHILE_COND1_WHILE_END_CRIT_EDGE_US:%.*]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-DEC-NEXT: [[MUL_US:%.*]] = mul i32 [[I_021_US]], [[N]]
-; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-DEC-NEXT: call void @llvm.set.loop.iterations.i32(i32 [[N]])
; CHECK-DEC-NEXT: br label [[WHILE_BODY3_US:%.*]]
; CHECK-DEC: while.body3.us:
; CHECK-DEC-NEXT: [[J_019_US:%.*]] = phi i32 [ 0, [[WHILE_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[WHILE_BODY3_US]] ]
@@ -364,7 +364,7 @@ define void @nested(ptr nocapture %A, i32 %N) strictfp {
; CHECK-DEC-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[ADD_US]]
; CHECK-DEC-NEXT: store i32 [[ADD_US]], ptr [[ARRAYIDX_US]], align 4
; CHECK-DEC-NEXT: [[INC_US]] = add nuw i32 [[J_019_US]], 1
-; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1) #[[ATTR0]]
+; CHECK-DEC-NEXT: [[TMP0:%.*]] = call i1 @llvm.loop.decrement.i32(i32 1)
; CHECK-DEC-NEXT: br i1 [[TMP0]], label [[WHILE_BODY3_US]], label [[WHILE_COND1_WHILE_END_CRIT_EDGE_US]]
; CHECK-DEC: while.cond1.while.end_crit_edge.us:
; CHECK-DEC-NEXT: [[INC6_US]] = add nuw i32 [[I_021_US]], 1
@@ -381,7 +381,7 @@ define void @nested(ptr nocapture %A, i32 %N) strictfp {
; CHECK-PHI: while.cond1.preheader.us:
; CHECK-PHI-NEXT: [[I_021_US:%.*]] = phi i32 [ [[INC6_US:%.*]], [[WHILE_COND1_WHILE_END_CRIT_EDGE_US:%.*]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-PHI-NEXT: [[MUL_US:%.*]] = mul i32 [[I_021_US]], [[N]]
-; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]]) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP0:%.*]] = call i32 @llvm.start.loop.iterations.i32(i32 [[N]])
; CHECK-PHI-NEXT: br label [[WHILE_BODY3_US:%.*]]
; CHECK-PHI: while.body3.us:
; CHECK-PHI-NEXT: [[J_019_US:%.*]] = phi i32 [ 0, [[WHILE_COND1_PREHEADER_US]] ], [ [[INC_US:%.*]], [[WHILE_BODY3_US]] ]
@@ -390,7 +390,7 @@ define void @nested(ptr nocapture %A, i32 %N) strictfp {
; CHECK-PHI-NEXT: [[ARRAYIDX_US:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i32 [[ADD_US]]
; CHECK-PHI-NEXT: store i32 [[ADD_US]], ptr [[ARRAYIDX_US]], align 4
; CHECK-PHI-NEXT: [[INC_US]] = add nuw i32 [[J_019_US]], 1
-; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1) #[[ATTR0]]
+; CHECK-PHI-NEXT: [[TMP2]] = call i32 @llvm.loop.decrement.reg.i32(i32 [[TMP1]], i32 1)
; CHECK-PHI-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-PHI-NEXT: br i1 [[TMP3]], label [[WHILE_BODY3_US]], label [[WHILE_COND1_WHILE_END_CRIT_EDGE_US]]
; CHECK-PHI: while.cond1.while.end_crit_edge.us:
More information about the llvm-commits
mailing list