[Mlir-commits] [clang] [clang-tools-extra] [compiler-rt] [libc] [libcxx] [lld] [lldb] [llvm] [mlir] [libc][math][C23] Implemented remquof128 function (PR #94809)
Hendrik Hübner
llvmlistbot at llvm.org
Sat Jun 8 09:57:40 PDT 2024
https://github.com/HendrikHuebner updated https://github.com/llvm/llvm-project/pull/94809
>From 6daec3d3b7d2e3d4e2e22c2ddf1b84b10a3ae6cc Mon Sep 17 00:00:00 2001
From: hhuebner <hendrik.huebner18 at gmail.com>
Date: Fri, 7 Jun 2024 23:42:59 +0200
Subject: [PATCH 01/57] [libc][C23] Implemented remquof128 function
[libc][C23] Implemented remquof128 function
---
libc/config/linux/aarch64/entrypoints.txt | 1 +
libc/config/linux/x86_64/entrypoints.txt | 1 +
libc/spec/stdc.td | 1 +
libc/src/math/CMakeLists.txt | 1 +
libc/src/math/generic/CMakeLists.txt | 12 ++++++++++++
libc/src/math/generic/remquof128.cpp | 19 +++++++++++++++++++
libc/src/math/remquof128.h | 20 ++++++++++++++++++++
libc/test/src/math/smoke/CMakeLists.txt | 14 ++++++++++++++
libc/test/src/math/smoke/remquof128_test.cpp | 13 +++++++++++++
9 files changed, 82 insertions(+)
create mode 100644 libc/src/math/generic/remquof128.cpp
create mode 100644 libc/src/math/remquof128.h
create mode 100644 libc/test/src/math/smoke/remquof128_test.cpp
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index 33ecff813a1fb..d30f01bb51c48 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -574,6 +574,7 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.nextafterf128
libc.src.math.nextdownf128
libc.src.math.nextupf128
+ libc.src.math.remquof128
libc.src.math.rintf128
libc.src.math.roundf128
libc.src.math.scalbnf128
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index e3ca544ae0185..814150ae757d5 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -604,6 +604,7 @@ if(LIBC_TYPES_HAS_FLOAT128)
libc.src.math.nextafterf128
libc.src.math.nextdownf128
libc.src.math.nextupf128
+ libc.src.math.remquof128
libc.src.math.rintf128
libc.src.math.roundevenf128
libc.src.math.roundf128
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index 9a436c8ae38d2..490aa0ba286ec 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -577,6 +577,7 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"remainderl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
FunctionSpec<"remquof", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>, ArgSpec<IntPtr>]>,
+ GuardedFunctionSpec<"remquof128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>, ArgSpec<IntPtr>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"remquo", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>, ArgSpec<IntPtr>]>,
FunctionSpec<"remquol", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>, ArgSpec<IntPtr>]>,
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 7a349ddc53724..4e275201072fa 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -309,6 +309,7 @@ add_math_entrypoint_object(remainderl)
add_math_entrypoint_object(remquo)
add_math_entrypoint_object(remquof)
+add_math_entrypoint_object(remquof128)
add_math_entrypoint_object(remquol)
add_math_entrypoint_object(rint)
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index b1d786fc6b29f..a859f7bd7268b 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -2416,6 +2416,18 @@ add_entrypoint_object(
-O2
)
+add_entrypoint_object(
+ remquof128
+ SRCS
+ remquof128.cpp
+ HDRS
+ ../remquof128.h
+ DEPENDS
+ libc.src.__support.FPUtil.division_and_remainder_operations
+ COMPILE_OPTIONS
+ -O2
+)
+
add_entrypoint_object(
remquo
SRCS
diff --git a/libc/src/math/generic/remquof128.cpp b/libc/src/math/generic/remquof128.cpp
new file mode 100644
index 0000000000000..e195c7b51b5ff
--- /dev/null
+++ b/libc/src/math/generic/remquof128.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of remquof128 function -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/remquof128.h"
+#include "src/__support/FPUtil/DivisionAndRemainderOperations.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float128, remquof128, (float128 x, float128 y, int *exp)) {
+ return fputil::remquo(x, y, *exp);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/math/remquof128.h b/libc/src/math/remquof128.h
new file mode 100644
index 0000000000000..e9db1ef5c5b51
--- /dev/null
+++ b/libc/src/math/remquof128.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for remquof128 --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_REMQUOF128_H
+#define LLVM_LIBC_SRC_MATH_REMQUOF128_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float128 remquof128(float128 x, float128 y, int *exp);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_REMQUOF128_H
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index 110fa1de97d6d..e9eb65a8982bc 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -2514,6 +2514,20 @@ add_fp_unittest(
libc.src.__support.FPUtil.fp_bits
)
+add_fp_unittest(
+ remquof128_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ remquof128_test.cpp
+ HDRS
+ RemQuoTest.h
+ DEPENDS
+ libc.src.math.remquof128
+ libc.src.__support.FPUtil.basic_operations
+ libc.src.__support.FPUtil.fp_bits
+)
+
add_fp_unittest(
remquo_test
SUITE
diff --git a/libc/test/src/math/smoke/remquof128_test.cpp b/libc/test/src/math/smoke/remquof128_test.cpp
new file mode 100644
index 0000000000000..8ef6c3b31cef2
--- /dev/null
+++ b/libc/test/src/math/smoke/remquof128_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for remquof128 ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RemQuoTest.h"
+
+#include "src/math/remquof128.h"
+
+LIST_REMQUO_TESTS(float128, LIBC_NAMESPACE::remquof128)
>From 4d884ba2cdcf10e17130b96d766f8339b2ca8eac Mon Sep 17 00:00:00 2001
From: hhuebner <hendrik.huebner18 at gmail.com>
Date: Sat, 8 Jun 2024 18:56:03 +0200
Subject: [PATCH 02/57] Update index and optimization level
---
libc/docs/math/index.rst | 2 +-
libc/src/math/generic/CMakeLists.txt | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index b9507f0887cd7..2bc452148121b 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -200,7 +200,7 @@ Basic Operations
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| remainder | |check| | |check| | |check| | | | 7.12.10.2 | F.10.7.2 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| remquo | |check| | |check| | |check| | | | 7.12.10.3 | F.10.7.3 |
+| remquo | |check| | |check| | |check| | | |check| | 7.12.10.3 | F.10.7.3 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| rint | |check| | |check| | |check| | |check| | |check| | 7.12.9.4 | F.10.6.4 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index a859f7bd7268b..46b2bd26c4604 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -2425,7 +2425,7 @@ add_entrypoint_object(
DEPENDS
libc.src.__support.FPUtil.division_and_remainder_operations
COMPILE_OPTIONS
- -O2
+ -O3
)
add_entrypoint_object(
>From 9b88839edb6647353bd148c8f74b87f7cde01226 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 15:16:03 -0700
Subject: [PATCH 03/57] [RISCV] Add TargetConstraintType=2 to vnclip
pseudoinstructions. NFC
These instructions are very similar to narrowing shift instructions
which already have this.
Remove TargetConstraintType parameter from VPseudoBinaryV_WV
class. Only 2 was ever passed to it. Pass 2 directly to the classes
instantiated from VPseudoBinaryV_WV instead.
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 30 +++++++++++--------
1 file changed, 18 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index fe4d839e4fdcb..b47ba21725da6 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2422,37 +2422,43 @@ multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f, int sew = 0> {
// exception from the spec.
// "The destination EEW is smaller than the source EEW and the overlap is in the
// lowest-numbered part of the source register group."
-multiclass VPseudoBinaryV_WV<LMULInfo m, int TargetConstraintType = 1> {
+multiclass VPseudoBinaryV_WV<LMULInfo m> {
defm _WV : VPseudoBinary<m.vrclass, m.wvrclass, m.vrclass, m,
- !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>;
+ !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
+ TargetConstraintType=2>;
}
multiclass VPseudoBinaryV_WV_RM<LMULInfo m> {
defm _WV : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, m.vrclass, m,
!if(!ge(m.octuple, 8),
- "@earlyclobber $rd", "")>;
+ "@earlyclobber $rd", ""),
+ TargetConstraintType=2>;
}
-multiclass VPseudoBinaryV_WX<LMULInfo m, int TargetConstraintType = 1> {
+multiclass VPseudoBinaryV_WX<LMULInfo m> {
defm _WX : VPseudoBinary<m.vrclass, m.wvrclass, GPR, m,
- !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>;
+ !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
+ TargetConstraintType=2>;
}
multiclass VPseudoBinaryV_WX_RM<LMULInfo m> {
defm _WX : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, GPR, m,
!if(!ge(m.octuple, 8),
- "@earlyclobber $rd", "")>;
+ "@earlyclobber $rd", ""),
+ TargetConstraintType=2>;
}
-multiclass VPseudoBinaryV_WI<LMULInfo m, int TargetConstraintType = 1> {
+multiclass VPseudoBinaryV_WI<LMULInfo m> {
defm _WI : VPseudoBinary<m.vrclass, m.wvrclass, uimm5, m,
- !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""), TargetConstraintType=TargetConstraintType>;
+ !if(!ge(m.octuple, 8), "@earlyclobber $rd", ""),
+ TargetConstraintType=2>;
}
multiclass VPseudoBinaryV_WI_RM<LMULInfo m> {
defm _WI : VPseudoBinaryRoundingMode<m.vrclass, m.wvrclass, uimm5, m,
!if(!ge(m.octuple, 8),
- "@earlyclobber $rd", "")>;
+ "@earlyclobber $rd", ""),
+ TargetConstraintType=2>;
}
// For vadc and vsbc, the instruction encoding is reserved if the destination
@@ -3195,13 +3201,13 @@ multiclass VPseudoVNCLP_WV_WX_WI_RM {
multiclass VPseudoVNSHT_WV_WX_WI {
foreach m = MxListW in {
defvar mx = m.MX;
- defm "" : VPseudoBinaryV_WV<m, TargetConstraintType=2>,
+ defm "" : VPseudoBinaryV_WV<m>,
SchedBinary<"WriteVNShiftV", "ReadVNShiftV", "ReadVNShiftV", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_WX<m, TargetConstraintType=2>,
+ defm "" : VPseudoBinaryV_WX<m>,
SchedBinary<"WriteVNShiftX", "ReadVNShiftV", "ReadVNShiftX", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_WI<m, TargetConstraintType=2>,
+ defm "" : VPseudoBinaryV_WI<m>,
SchedUnary<"WriteVNShiftI", "ReadVNShiftV", mx,
forceMergeOpRead=true>;
}
>From 4465cc70dbbddc2a4f47ace0c74b3e291577dbb2 Mon Sep 17 00:00:00 2001
From: OverMighty <its.overmighty at gmail.com>
Date: Sat, 8 Jun 2024 00:26:58 +0200
Subject: [PATCH 04/57] [libc][math][c23] Add fmodf16 C23 math function
(#94629)
Part of #93566.
---
libc/config/linux/aarch64/entrypoints.txt | 1 +
libc/config/linux/x86_64/entrypoints.txt | 1 +
libc/docs/math/index.rst | 2 +-
libc/spec/stdc.td | 1 +
libc/src/__support/FPUtil/FPBits.h | 2 +-
libc/src/__support/FPUtil/generic/FMod.h | 4 +-
libc/src/math/CMakeLists.txt | 1 +
libc/src/math/fmodf16.h | 20 +++++++
libc/src/math/generic/CMakeLists.txt | 13 +++++
libc/src/math/generic/fmodf16.cpp | 19 +++++++
.../BinaryOpSingleOutputPerf.h | 52 +++++++++++++------
.../math/performance_testing/CMakeLists.txt | 13 +++++
.../math/performance_testing/fmodf16_perf.cpp | 27 ++++++++++
libc/test/src/math/smoke/CMakeLists.txt | 33 +++++++++---
libc/test/src/math/smoke/FModTest.h | 37 ++++++-------
libc/test/src/math/smoke/fmodf16_test.cpp | 13 +++++
16 files changed, 193 insertions(+), 46 deletions(-)
create mode 100644 libc/src/math/fmodf16.h
create mode 100644 libc/src/math/generic/fmodf16.cpp
create mode 100644 libc/test/src/math/performance_testing/fmodf16_perf.cpp
create mode 100644 libc/test/src/math/smoke/fmodf16_test.cpp
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index d30f01bb51c48..1852772fdbf61 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -515,6 +515,7 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.fminimum_magf16
libc.src.math.fminimum_mag_numf16
libc.src.math.fminimum_numf16
+ libc.src.math.fmodf16
libc.src.math.fromfpf16
libc.src.math.fromfpxf16
libc.src.math.llrintf16
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 814150ae757d5..77854d09b410e 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -547,6 +547,7 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.fminimum_magf16
libc.src.math.fminimum_mag_numf16
libc.src.math.fminimum_numf16
+ libc.src.math.fmodf16
libc.src.math.fromfpf16
libc.src.math.fromfpxf16
libc.src.math.llrintf16
diff --git a/libc/docs/math/index.rst b/libc/docs/math/index.rst
index 2bc452148121b..61d737abbcfbc 100644
--- a/libc/docs/math/index.rst
+++ b/libc/docs/math/index.rst
@@ -156,7 +156,7 @@ Basic Operations
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| fminimum_num | |check| | |check| | |check| | |check| | |check| | 7.12.12.9 | F.10.9.5 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
-| fmod | |check| | |check| | |check| | | |check| | 7.12.10.1 | F.10.7.1 |
+| fmod | |check| | |check| | |check| | |check| | |check| | 7.12.10.1 | F.10.7.1 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
| fmul | N/A | | | N/A | | 7.12.14.3 | F.10.11 |
+------------------+------------------+-----------------+------------------------+----------------------+------------------------+------------------------+----------------------------+
diff --git a/libc/spec/stdc.td b/libc/spec/stdc.td
index 490aa0ba286ec..d707f15246cb4 100644
--- a/libc/spec/stdc.td
+++ b/libc/spec/stdc.td
@@ -478,6 +478,7 @@ def StdC : StandardSpec<"stdc"> {
FunctionSpec<"fmod", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<DoubleType>]>,
FunctionSpec<"fmodf", RetValSpec<FloatType>, [ArgSpec<FloatType>, ArgSpec<FloatType>]>,
FunctionSpec<"fmodl", RetValSpec<LongDoubleType>, [ArgSpec<LongDoubleType>, ArgSpec<LongDoubleType>]>,
+ GuardedFunctionSpec<"fmodf16", RetValSpec<Float16Type>, [ArgSpec<Float16Type>, ArgSpec<Float16Type>], "LIBC_TYPES_HAS_FLOAT16">,
GuardedFunctionSpec<"fmodf128", RetValSpec<Float128Type>, [ArgSpec<Float128Type>, ArgSpec<Float128Type>], "LIBC_TYPES_HAS_FLOAT128">,
FunctionSpec<"frexp", RetValSpec<DoubleType>, [ArgSpec<DoubleType>, ArgSpec<IntPtr>]>,
diff --git a/libc/src/__support/FPUtil/FPBits.h b/libc/src/__support/FPUtil/FPBits.h
index d3c96d2d613d6..559ecde767c30 100644
--- a/libc/src/__support/FPUtil/FPBits.h
+++ b/libc/src/__support/FPUtil/FPBits.h
@@ -744,7 +744,7 @@ struct FPRepImpl : public FPRepSem<fp_type, RetT> {
if (LIBC_LIKELY(ep >= 0)) {
// Implicit number bit will be removed by mask
result.set_significand(number);
- result.set_biased_exponent(ep + 1);
+ result.set_biased_exponent(static_cast<StorageType>(ep + 1));
} else {
result.set_significand(number >> -ep);
}
diff --git a/libc/src/__support/FPUtil/generic/FMod.h b/libc/src/__support/FPUtil/generic/FMod.h
index 211ab926d28b0..f840a92b1a5a2 100644
--- a/libc/src/__support/FPUtil/generic/FMod.h
+++ b/libc/src/__support/FPUtil/generic/FMod.h
@@ -210,7 +210,9 @@ class FMod {
e_x - e_y <= int(FPB::EXP_LEN))) {
StorageType m_x = sx.get_explicit_mantissa();
StorageType m_y = sy.get_explicit_mantissa();
- StorageType d = (e_x == e_y) ? (m_x - m_y) : (m_x << (e_x - e_y)) % m_y;
+ StorageType d = (e_x == e_y)
+ ? (m_x - m_y)
+ : static_cast<StorageType>(m_x << (e_x - e_y)) % m_y;
if (d == 0)
return FPB::zero();
// iy - 1 because of "zero power" for number with power 1
diff --git a/libc/src/math/CMakeLists.txt b/libc/src/math/CMakeLists.txt
index 4e275201072fa..70ecb526d5a33 100644
--- a/libc/src/math/CMakeLists.txt
+++ b/libc/src/math/CMakeLists.txt
@@ -183,6 +183,7 @@ add_math_entrypoint_object(fminimum_mag_numf128)
add_math_entrypoint_object(fmod)
add_math_entrypoint_object(fmodf)
add_math_entrypoint_object(fmodl)
+add_math_entrypoint_object(fmodf16)
add_math_entrypoint_object(fmodf128)
add_math_entrypoint_object(frexp)
diff --git a/libc/src/math/fmodf16.h b/libc/src/math/fmodf16.h
new file mode 100644
index 0000000000000..ab658430275d8
--- /dev/null
+++ b/libc/src/math/fmodf16.h
@@ -0,0 +1,20 @@
+//===-- Implementation header for fmodf16 -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC_SRC_MATH_FMODF16_H
+#define LLVM_LIBC_SRC_MATH_FMODF16_H
+
+#include "src/__support/macros/properties/types.h"
+
+namespace LIBC_NAMESPACE {
+
+float16 fmodf16(float16 x, float16 y);
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC_MATH_FMODF16_H
diff --git a/libc/src/math/generic/CMakeLists.txt b/libc/src/math/generic/CMakeLists.txt
index 46b2bd26c4604..bd1c5d4f48ba7 100644
--- a/libc/src/math/generic/CMakeLists.txt
+++ b/libc/src/math/generic/CMakeLists.txt
@@ -2899,6 +2899,19 @@ add_entrypoint_object(
-O3
)
+add_entrypoint_object(
+ fmodf16
+ SRCS
+ fmodf16.cpp
+ HDRS
+ ../fmodf16.h
+ DEPENDS
+ libc.src.__support.macros.properties.types
+ libc.src.__support.FPUtil.generic.fmod
+ COMPILE_OPTIONS
+ -O3
+)
+
add_entrypoint_object(
fmodf128
SRCS
diff --git a/libc/src/math/generic/fmodf16.cpp b/libc/src/math/generic/fmodf16.cpp
new file mode 100644
index 0000000000000..a5bfd78113f63
--- /dev/null
+++ b/libc/src/math/generic/fmodf16.cpp
@@ -0,0 +1,19 @@
+//===-- Implementation of fmodf16 function --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/math/fmodf16.h"
+#include "src/__support/FPUtil/generic/FMod.h"
+#include "src/__support/common.h"
+
+namespace LIBC_NAMESPACE {
+
+LLVM_LIBC_FUNCTION(float16, fmodf16, (float16 x, float16 y)) {
+ return fputil::generic::FMod<float16, uint32_t>::eval(x, y);
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/test/src/math/performance_testing/BinaryOpSingleOutputPerf.h b/libc/test/src/math/performance_testing/BinaryOpSingleOutputPerf.h
index 504d1be94b891..3027932c70f40 100644
--- a/libc/test/src/math/performance_testing/BinaryOpSingleOutputPerf.h
+++ b/libc/test/src/math/performance_testing/BinaryOpSingleOutputPerf.h
@@ -9,6 +9,7 @@
#include "src/__support/FPUtil/FPBits.h"
#include "test/src/math/performance_testing/Timer.h"
+#include <cstddef>
#include <fstream>
namespace LIBC_NAMESPACE {
@@ -25,7 +26,10 @@ template <typename T> class BinaryOpSingleOutputPerf {
static void run_perf_in_range(Func myFunc, Func otherFunc,
StorageType startingBit, StorageType endingBit,
- StorageType N, std::ofstream &log) {
+ size_t N, size_t rounds, std::ofstream &log) {
+ if (endingBit - startingBit < N)
+ N = endingBit - startingBit;
+
auto runner = [=](Func func) {
volatile T result;
if (endingBit < startingBit) {
@@ -33,13 +37,15 @@ template <typename T> class BinaryOpSingleOutputPerf {
}
StorageType step = (endingBit - startingBit) / N;
- for (StorageType bitsX = startingBit, bitsY = endingBit;;
- bitsX += step, bitsY -= step) {
- T x = FPBits(bitsX).get_val();
- T y = FPBits(bitsY).get_val();
- result = func(x, y);
- if (endingBit - bitsX < step) {
- break;
+ for (size_t i = 0; i < rounds; i++) {
+ for (StorageType bitsX = startingBit, bitsY = endingBit;;
+ bitsX += step, bitsY -= step) {
+ T x = FPBits(bitsX).get_val();
+ T y = FPBits(bitsY).get_val();
+ result = func(x, y);
+ if (endingBit - bitsX < step) {
+ break;
+ }
}
}
};
@@ -49,7 +55,7 @@ template <typename T> class BinaryOpSingleOutputPerf {
runner(myFunc);
timer.stop();
- double my_average = static_cast<double>(timer.nanoseconds()) / N;
+ double my_average = static_cast<double>(timer.nanoseconds()) / N / rounds;
log << "-- My function --\n";
log << " Total time : " << timer.nanoseconds() << " ns \n";
log << " Average runtime : " << my_average << " ns/op \n";
@@ -60,7 +66,8 @@ template <typename T> class BinaryOpSingleOutputPerf {
runner(otherFunc);
timer.stop();
- double other_average = static_cast<double>(timer.nanoseconds()) / N;
+ double other_average =
+ static_cast<double>(timer.nanoseconds()) / N / rounds;
log << "-- Other function --\n";
log << " Total time : " << timer.nanoseconds() << " ns \n";
log << " Average runtime : " << other_average << " ns/op \n";
@@ -71,22 +78,24 @@ template <typename T> class BinaryOpSingleOutputPerf {
log << " Mine / Other's : " << my_average / other_average << " \n";
}
- static void run_perf(Func myFunc, Func otherFunc, const char *logFile) {
+ static void run_perf(Func myFunc, Func otherFunc, int rounds,
+ const char *logFile) {
std::ofstream log(logFile);
log << " Performance tests with inputs in denormal range:\n";
run_perf_in_range(myFunc, otherFunc, /* startingBit= */ StorageType(0),
/* endingBit= */ FPBits::max_subnormal().uintval(),
- 10'000'001, log);
+ 1'000'001, rounds, log);
log << "\n Performance tests with inputs in normal range:\n";
run_perf_in_range(myFunc, otherFunc,
/* startingBit= */ FPBits::min_normal().uintval(),
/* endingBit= */ FPBits::max_normal().uintval(),
- 10'000'001, log);
+ 1'000'001, rounds, log);
log << "\n Performance tests with inputs in normal range with exponents "
"close to each other:\n";
- run_perf_in_range(
- myFunc, otherFunc, /* startingBit= */ FPBits(T(0x1.0p-10)).uintval(),
- /* endingBit= */ FPBits(T(0x1.0p+10)).uintval(), 1'001'001, log);
+ run_perf_in_range(myFunc, otherFunc,
+ /* startingBit= */ FPBits(T(0x1.0p-10)).uintval(),
+ /* endingBit= */ FPBits(T(0x1.0p+10)).uintval(),
+ 1'000'001, rounds, log);
}
static void run_diff(Func myFunc, Func otherFunc, const char *logFile) {
@@ -117,6 +126,15 @@ template <typename T> class BinaryOpSingleOutputPerf {
#define BINARY_OP_SINGLE_OUTPUT_PERF(T, myFunc, otherFunc, filename) \
int main() { \
LIBC_NAMESPACE::testing::BinaryOpSingleOutputPerf<T>::run_perf( \
- &myFunc, &otherFunc, filename); \
+ &myFunc, &otherFunc, 1, filename); \
return 0; \
}
+
+#define BINARY_OP_SINGLE_OUTPUT_PERF_EX(T, myFunc, otherFunc, rounds, \
+ filename) \
+ { \
+ LIBC_NAMESPACE::testing::BinaryOpSingleOutputPerf<T>::run_perf( \
+ &myFunc, &otherFunc, rounds, filename); \
+ LIBC_NAMESPACE::testing::BinaryOpSingleOutputPerf<T>::run_perf( \
+ &myFunc, &otherFunc, rounds, filename); \
+ }
diff --git a/libc/test/src/math/performance_testing/CMakeLists.txt b/libc/test/src/math/performance_testing/CMakeLists.txt
index d1fb24e37f728..4ea78f9999e4d 100644
--- a/libc/test/src/math/performance_testing/CMakeLists.txt
+++ b/libc/test/src/math/performance_testing/CMakeLists.txt
@@ -88,6 +88,8 @@ add_header_library(
binary_op_single_output_diff
HDRS
BinaryOpSingleOutputPerf.h
+ DEPENDS
+ libc.src.__support.FPUtil.fp_bits
)
add_perf_binary(
@@ -343,6 +345,17 @@ add_perf_binary(
-fno-builtin
)
+add_perf_binary(
+ fmodf16_perf
+ SRCS
+ fmodf16_perf.cpp
+ DEPENDS
+ .binary_op_single_output_diff
+ libc.src.math.fmodf16
+ libc.src.__support.FPUtil.generic.fmod
+ libc.src.__support.macros.properties.types
+)
+
add_perf_binary(
fmodf128_perf
SRCS
diff --git a/libc/test/src/math/performance_testing/fmodf16_perf.cpp b/libc/test/src/math/performance_testing/fmodf16_perf.cpp
new file mode 100644
index 0000000000000..ff01fa6ca5870
--- /dev/null
+++ b/libc/test/src/math/performance_testing/fmodf16_perf.cpp
@@ -0,0 +1,27 @@
+//===-- Performance test for fmodf16 --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "BinaryOpSingleOutputPerf.h"
+
+#include "src/__support/FPUtil/generic/FMod.h"
+#include "src/__support/macros/properties/types.h"
+
+#include <stdint.h>
+
+#define FMOD_FUNC(U) (LIBC_NAMESPACE::fputil::generic::FMod<float16, U>::eval)
+
+int main() {
+ BINARY_OP_SINGLE_OUTPUT_PERF_EX(float16, FMOD_FUNC(uint16_t),
+ FMOD_FUNC(uint32_t), 5000,
+ "fmodf16_u16_vs_u32_perf.log")
+
+ BINARY_OP_SINGLE_OUTPUT_PERF_EX(float16, FMOD_FUNC(uint16_t),
+ FMOD_FUNC(uint64_t), 5000,
+ "fmodf16_u16_vs_u64_perf.log")
+ return 0;
+}
diff --git a/libc/test/src/math/smoke/CMakeLists.txt b/libc/test/src/math/smoke/CMakeLists.txt
index e9eb65a8982bc..401196dbc74c9 100644
--- a/libc/test/src/math/smoke/CMakeLists.txt
+++ b/libc/test/src/math/smoke/CMakeLists.txt
@@ -3125,10 +3125,10 @@ add_fp_unittest(
HDRS
FModTest.h
DEPENDS
+ libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.fmodf
- libc.src.__support.FPUtil.basic_operations
- libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.FPUtil.fenv_impl
# FIXME: Currently fails on the GPU build.
UNIT_TEST_ONLY
)
@@ -3142,10 +3142,10 @@ add_fp_unittest(
HDRS
FModTest.h
DEPENDS
+ libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.fmod
- libc.src.__support.FPUtil.basic_operations
- libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.FPUtil.fenv_impl
# FIXME: Currently fails on the GPU build.
UNIT_TEST_ONLY
)
@@ -3159,10 +3159,27 @@ add_fp_unittest(
HDRS
FModTest.h
DEPENDS
+ libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.fmodl
- libc.src.__support.FPUtil.basic_operations
- libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.FPUtil.fenv_impl
+ # FIXME: Currently fails on the GPU build.
+ UNIT_TEST_ONLY
+)
+
+add_fp_unittest(
+ fmodf16_test
+ SUITE
+ libc-math-smoke-tests
+ SRCS
+ fmodf16_test.cpp
+ HDRS
+ FModTest.h
+ DEPENDS
+ libc.hdr.fenv_macros
+ libc.src.errno.errno
+ libc.src.math.fmodf16
+ libc.src.__support.FPUtil.fenv_impl
# FIXME: Currently fails on the GPU build.
UNIT_TEST_ONLY
)
@@ -3176,10 +3193,10 @@ add_fp_unittest(
HDRS
FModTest.h
DEPENDS
+ libc.hdr.fenv_macros
libc.src.errno.errno
libc.src.math.fmodf128
- libc.src.__support.FPUtil.basic_operations
- libc.src.__support.FPUtil.nearest_integer_operations
+ libc.src.__support.FPUtil.fenv_impl
# FIXME: Currently fails on the GPU build.
UNIT_TEST_ONLY
)
diff --git a/libc/test/src/math/smoke/FModTest.h b/libc/test/src/math/smoke/FModTest.h
index f1015d6497fcd..405e3107438d4 100644
--- a/libc/test/src/math/smoke/FModTest.h
+++ b/libc/test/src/math/smoke/FModTest.h
@@ -9,13 +9,13 @@
#ifndef LLVM_LIBC_TEST_SRC_MATH_FMODTEST_H
#define LLVM_LIBC_TEST_SRC_MATH_FMODTEST_H
-#include "src/__support/FPUtil/BasicOperations.h"
-#include "src/__support/FPUtil/NearestIntegerOperations.h"
+#include "src/__support/FPUtil/FEnvImpl.h"
+#include "src/errno/libc_errno.h"
#include "test/UnitTest/FEnvSafeTest.h"
#include "test/UnitTest/FPMatcher.h"
#include "test/UnitTest/Test.h"
-#include "hdr/math_macros.h"
+#include "hdr/fenv_macros.h"
#define TEST_SPECIAL(x, y, expected, dom_err, expected_exception) \
EXPECT_FP_EQ(expected, f(x, y)); \
@@ -210,7 +210,8 @@ class FmodTest : public LIBC_NAMESPACE::testing::FEnvSafeTest {
}
void testRegularExtreme(FModFunc f) {
-
+ if constexpr (sizeof(T) < sizeof(float))
+ return;
TEST_REGULAR(0x1p127L, 0x3p-149L, 0x1p-149L);
TEST_REGULAR(0x1p127L, -0x3p-149L, 0x1p-149L);
TEST_REGULAR(0x1p127L, 0x3p-148L, 0x1p-147L);
@@ -224,20 +225,20 @@ class FmodTest : public LIBC_NAMESPACE::testing::FEnvSafeTest {
TEST_REGULAR(-0x1p127L, 0x3p-126L, -0x1p-125L);
TEST_REGULAR(-0x1p127L, -0x3p-126L, -0x1p-125L);
- if constexpr (sizeof(T) >= sizeof(double)) {
- TEST_REGULAR(0x1p1023L, 0x3p-1074L, 0x1p-1073L);
- TEST_REGULAR(0x1p1023L, -0x3p-1074L, 0x1p-1073L);
- TEST_REGULAR(0x1p1023L, 0x3p-1073L, 0x1p-1073L);
- TEST_REGULAR(0x1p1023L, -0x3p-1073L, 0x1p-1073L);
- TEST_REGULAR(0x1p1023L, 0x3p-1022L, 0x1p-1021L);
- TEST_REGULAR(0x1p1023L, -0x3p-1022L, 0x1p-1021L);
- TEST_REGULAR(-0x1p1023L, 0x3p-1074L, -0x1p-1073L);
- TEST_REGULAR(-0x1p1023L, -0x3p-1074L, -0x1p-1073L);
- TEST_REGULAR(-0x1p1023L, 0x3p-1073L, -0x1p-1073L);
- TEST_REGULAR(-0x1p1023L, -0x3p-1073L, -0x1p-1073L);
- TEST_REGULAR(-0x1p1023L, 0x3p-1022L, -0x1p-1021L);
- TEST_REGULAR(-0x1p1023L, -0x3p-1022L, -0x1p-1021L);
- }
+ if constexpr (sizeof(T) < sizeof(double))
+ return;
+ TEST_REGULAR(0x1p1023L, 0x3p-1074L, 0x1p-1073L);
+ TEST_REGULAR(0x1p1023L, -0x3p-1074L, 0x1p-1073L);
+ TEST_REGULAR(0x1p1023L, 0x3p-1073L, 0x1p-1073L);
+ TEST_REGULAR(0x1p1023L, -0x3p-1073L, 0x1p-1073L);
+ TEST_REGULAR(0x1p1023L, 0x3p-1022L, 0x1p-1021L);
+ TEST_REGULAR(0x1p1023L, -0x3p-1022L, 0x1p-1021L);
+ TEST_REGULAR(-0x1p1023L, 0x3p-1074L, -0x1p-1073L);
+ TEST_REGULAR(-0x1p1023L, -0x3p-1074L, -0x1p-1073L);
+ TEST_REGULAR(-0x1p1023L, 0x3p-1073L, -0x1p-1073L);
+ TEST_REGULAR(-0x1p1023L, -0x3p-1073L, -0x1p-1073L);
+ TEST_REGULAR(-0x1p1023L, 0x3p-1022L, -0x1p-1021L);
+ TEST_REGULAR(-0x1p1023L, -0x3p-1022L, -0x1p-1021L);
}
};
diff --git a/libc/test/src/math/smoke/fmodf16_test.cpp b/libc/test/src/math/smoke/fmodf16_test.cpp
new file mode 100644
index 0000000000000..9a48c5aa0d609
--- /dev/null
+++ b/libc/test/src/math/smoke/fmodf16_test.cpp
@@ -0,0 +1,13 @@
+//===-- Unittests for fmodf16 ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "FModTest.h"
+
+#include "src/math/fmodf16.h"
+
+LIST_FMOD_TESTS(float16, LIBC_NAMESPACE::fmodf16)
>From c3c7456146de9df4f3f9eeb2c774860bc3990a78 Mon Sep 17 00:00:00 2001
From: Congcong Cai <congcongcai0907 at 163.com>
Date: Sat, 8 Jun 2024 06:46:39 +0800
Subject: [PATCH 05/57] [clang-tidy] new check misc-use-internal-linkage
(#90830)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Add new check misc-use-internal-linkage to detect variable and function
can be marked as static.
---------
Co-authored-by: Danny Mösch <danny.moesch at icloud.com>
---
.../clang-tidy/misc/CMakeLists.txt | 1 +
.../clang-tidy/misc/MiscTidyModule.cpp | 3 +
.../misc/UseInternalLinkageCheck.cpp | 95 +++++++++++++++++++
.../clang-tidy/misc/UseInternalLinkageCheck.h | 38 ++++++++
clang-tools-extra/docs/ReleaseNotes.rst | 6 ++
.../docs/clang-tidy/checks/list.rst | 1 +
.../checks/misc/use-internal-linkage.rst | 27 ++++++
.../misc/Inputs/use-internal-linkage/func.h | 5 +
.../Inputs/use-internal-linkage/func_cpp.inc | 1 +
.../Inputs/use-internal-linkage/func_h.inc | 1 +
.../misc/Inputs/use-internal-linkage/var.h | 3 +
.../misc/use-internal-linkage-func.cpp | 37 ++++++++
.../misc/use-internal-linkage-var.cpp | 40 ++++++++
13 files changed, 258 insertions(+)
create mode 100644 clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.cpp
create mode 100644 clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.h
create mode 100644 clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst
create mode 100644 clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func.h
create mode 100644 clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_cpp.inc
create mode 100644 clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_h.inc
create mode 100644 clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/var.h
create mode 100644 clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-func.cpp
create mode 100644 clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-var.cpp
diff --git a/clang-tools-extra/clang-tidy/misc/CMakeLists.txt b/clang-tools-extra/clang-tidy/misc/CMakeLists.txt
index 36fcd8fc1b277..1c1d3b836ea1b 100644
--- a/clang-tools-extra/clang-tidy/misc/CMakeLists.txt
+++ b/clang-tools-extra/clang-tidy/misc/CMakeLists.txt
@@ -41,6 +41,7 @@ add_clang_library(clangTidyMiscModule
UnusedParametersCheck.cpp
UnusedUsingDeclsCheck.cpp
UseAnonymousNamespaceCheck.cpp
+ UseInternalLinkageCheck.cpp
LINK_LIBS
clangTidy
diff --git a/clang-tools-extra/clang-tidy/misc/MiscTidyModule.cpp b/clang-tools-extra/clang-tidy/misc/MiscTidyModule.cpp
index d8a88324ee63e..54bcebca7e186 100644
--- a/clang-tools-extra/clang-tidy/misc/MiscTidyModule.cpp
+++ b/clang-tools-extra/clang-tidy/misc/MiscTidyModule.cpp
@@ -31,6 +31,7 @@
#include "UnusedParametersCheck.h"
#include "UnusedUsingDeclsCheck.h"
#include "UseAnonymousNamespaceCheck.h"
+#include "UseInternalLinkageCheck.h"
namespace clang::tidy {
namespace misc {
@@ -78,6 +79,8 @@ class MiscModule : public ClangTidyModule {
"misc-unused-using-decls");
CheckFactories.registerCheck<UseAnonymousNamespaceCheck>(
"misc-use-anonymous-namespace");
+ CheckFactories.registerCheck<UseInternalLinkageCheck>(
+ "misc-use-internal-linkage");
}
};
diff --git a/clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.cpp b/clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.cpp
new file mode 100644
index 0000000000000..70d0281df28fa
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.cpp
@@ -0,0 +1,95 @@
+//===--- UseInternalLinkageCheck.cpp - clang-tidy--------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "UseInternalLinkageCheck.h"
+#include "../utils/FileExtensionsUtils.h"
+#include "clang/AST/Decl.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/ASTMatchers/ASTMatchersMacros.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang::ast_matchers;
+
+namespace clang::tidy::misc {
+
+namespace {
+
+AST_MATCHER(Decl, isFirstDecl) { return Node.isFirstDecl(); }
+
+static bool isInMainFile(SourceLocation L, SourceManager &SM,
+ const FileExtensionsSet &HeaderFileExtensions) {
+ for (;;) {
+ if (utils::isSpellingLocInHeaderFile(L, SM, HeaderFileExtensions))
+ return false;
+ if (SM.isInMainFile(L))
+ return true;
+ // not in header file but not in main file
+ L = SM.getIncludeLoc(SM.getFileID(L));
+ if (L.isValid())
+ continue;
+ // Conservative about the unknown
+ return false;
+ }
+}
+
+AST_MATCHER_P(Decl, isAllRedeclsInMainFile, FileExtensionsSet,
+ HeaderFileExtensions) {
+ return llvm::all_of(Node.redecls(), [&](const Decl *D) {
+ return isInMainFile(D->getLocation(),
+ Finder->getASTContext().getSourceManager(),
+ HeaderFileExtensions);
+ });
+}
+
+AST_POLYMORPHIC_MATCHER(isExternStorageClass,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl,
+ VarDecl)) {
+ return Node.getStorageClass() == SC_Extern;
+}
+
+} // namespace
+
+void UseInternalLinkageCheck::registerMatchers(MatchFinder *Finder) {
+ auto Common =
+ allOf(isFirstDecl(), isAllRedeclsInMainFile(HeaderFileExtensions),
+ unless(anyOf(
+ // 1. internal linkage
+ isStaticStorageClass(), isInAnonymousNamespace(),
+ // 2. explicit external linkage
+ isExternStorageClass(), isExternC(),
+ // 3. template
+ isExplicitTemplateSpecialization(),
+ // 4. friend
+ hasAncestor(friendDecl()))));
+ Finder->addMatcher(
+ functionDecl(Common, unless(cxxMethodDecl()), unless(isMain()))
+ .bind("fn"),
+ this);
+ Finder->addMatcher(varDecl(Common, hasGlobalStorage()).bind("var"), this);
+}
+
+static constexpr StringRef Message =
+ "%0 %1 can be made static or moved into an anonymous namespace "
+ "to enforce internal linkage";
+
+void UseInternalLinkageCheck::check(const MatchFinder::MatchResult &Result) {
+ if (const auto *FD = Result.Nodes.getNodeAs<FunctionDecl>("fn")) {
+ diag(FD->getLocation(), Message) << "function" << FD;
+ return;
+ }
+ if (const auto *VD = Result.Nodes.getNodeAs<VarDecl>("var")) {
+ diag(VD->getLocation(), Message) << "variable" << VD;
+ return;
+ }
+ llvm_unreachable("");
+}
+
+} // namespace clang::tidy::misc
diff --git a/clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.h b/clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.h
new file mode 100644
index 0000000000000..a3c1c33965903
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/misc/UseInternalLinkageCheck.h
@@ -0,0 +1,38 @@
+//===--- UseInternalLinkageCheck.h - clang-tidy -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MISC_USEINTERNALLINKAGECHECK_H
+#define LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MISC_USEINTERNALLINKAGECHECK_H
+
+#include "../ClangTidyCheck.h"
+
+namespace clang::tidy::misc {
+
+/// Detects variables and functions that can be marked as static or moved into
+/// an anonymous namespace to enforce internal linkage.
+///
+/// For the user-facing documentation see:
+/// http://clang.llvm.org/extra/clang-tidy/checks/misc/use-internal-linkage.html
+class UseInternalLinkageCheck : public ClangTidyCheck {
+public:
+ UseInternalLinkageCheck(StringRef Name, ClangTidyContext *Context)
+ : ClangTidyCheck(Name, Context),
+ HeaderFileExtensions(Context->getHeaderFileExtensions()) {}
+ void registerMatchers(ast_matchers::MatchFinder *Finder) override;
+ void check(const ast_matchers::MatchFinder::MatchResult &Result) override;
+ std::optional<TraversalKind> getCheckTraversalKind() const override {
+ return TK_IgnoreUnlessSpelledInSource;
+ }
+
+private:
+ FileExtensionsSet HeaderFileExtensions;
+};
+
+} // namespace clang::tidy::misc
+
+#endif // LLVM_CLANG_TOOLS_EXTRA_CLANG_TIDY_MISC_USEINTERNALLINKAGECHECK_H
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index da30aceb8d49d..277a6e75da2ac 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -148,6 +148,12 @@ New checks
to reading out-of-bounds data due to inadequate or incorrect string null
termination.
+- New :doc:`misc-use-internal-linkage
+ <clang-tidy/checks/misc/use-internal-linkage>` check.
+
+ Detects variables and functions that can be marked as static or moved into
+ an anonymous namespace to enforce internal linkage.
+
- New :doc:`modernize-min-max-use-initializer-list
<clang-tidy/checks/modernize/min-max-use-initializer-list>` check.
diff --git a/clang-tools-extra/docs/clang-tidy/checks/list.rst b/clang-tools-extra/docs/clang-tidy/checks/list.rst
index 85e4f0352ac22..87d3db20f7684 100644
--- a/clang-tools-extra/docs/clang-tidy/checks/list.rst
+++ b/clang-tools-extra/docs/clang-tidy/checks/list.rst
@@ -267,6 +267,7 @@ Clang-Tidy Checks
:doc:`misc-unused-parameters <misc/unused-parameters>`, "Yes"
:doc:`misc-unused-using-decls <misc/unused-using-decls>`, "Yes"
:doc:`misc-use-anonymous-namespace <misc/use-anonymous-namespace>`,
+ :doc:`misc-use-internal-linkage <misc/use-internal-linkage>`,
:doc:`modernize-avoid-bind <modernize/avoid-bind>`, "Yes"
:doc:`modernize-avoid-c-arrays <modernize/avoid-c-arrays>`,
:doc:`modernize-concat-nested-namespaces <modernize/concat-nested-namespaces>`, "Yes"
diff --git a/clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst b/clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst
new file mode 100644
index 0000000000000..e8e43a1fb3d63
--- /dev/null
+++ b/clang-tools-extra/docs/clang-tidy/checks/misc/use-internal-linkage.rst
@@ -0,0 +1,27 @@
+.. title:: clang-tidy - misc-use-internal-linkage
+
+misc-use-internal-linkage
+=========================
+
+Detects variables and functions that can be marked as static or moved into
+an anonymous namespace to enforce internal linkage.
+
+Static functions and variables are scoped to a single file. Marking functions
+and variables as static helps to better remove dead code. In addition, it gives
+the compiler more information and allows for more aggressive optimizations.
+
+Example:
+
+.. code-block:: c++
+
+ int v1; // can be marked as static
+
+ void fn1(); // can be marked as static
+
+ namespace {
+ // already in anonymous namespace
+ int v2;
+ void fn2();
+ }
+ // already declared as extern
+ extern int v2;
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func.h b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func.h
new file mode 100644
index 0000000000000..0f2b576a126c4
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func.h
@@ -0,0 +1,5 @@
+#pragma once
+
+void func_header();
+
+#include "func_h.inc"
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_cpp.inc b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_cpp.inc
new file mode 100644
index 0000000000000..97e026f0116e9
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_cpp.inc
@@ -0,0 +1 @@
+void func_cpp_inc();
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_h.inc b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_h.inc
new file mode 100644
index 0000000000000..1130f710edd7c
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/func_h.inc
@@ -0,0 +1 @@
+void func_h_inc();
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/var.h b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/var.h
new file mode 100644
index 0000000000000..37e4cfbafff14
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/Inputs/use-internal-linkage/var.h
@@ -0,0 +1,3 @@
+#pragma once
+
+extern int gloabl_header;
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-func.cpp b/clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-func.cpp
new file mode 100644
index 0000000000000..c6c513fe0b0c0
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-func.cpp
@@ -0,0 +1,37 @@
+// RUN: %check_clang_tidy %s misc-use-internal-linkage %t -- -- -I%S/Inputs/use-internal-linkage
+
+#include "func.h"
+
+void func() {}
+// CHECK-MESSAGES: :[[@LINE-1]]:6: warning: function 'func'
+
+template<class T>
+void func_template() {}
+// CHECK-MESSAGES: :[[@LINE-1]]:6: warning: function 'func_template'
+
+void func_cpp_inc();
+// CHECK-MESSAGES: :[[@LINE-1]]:6: warning: function 'func_cpp_inc'
+
+#include "func_cpp.inc"
+
+void func_h_inc();
+
+struct S {
+ void method();
+};
+void S::method() {}
+
+void func_header();
+extern void func_extern();
+static void func_static();
+namespace {
+void func_anonymous_ns();
+} // namespace
+
+int main(int argc, const char*argv[]) {}
+
+extern "C" {
+void func_extern_c_1() {}
+}
+
+extern "C" void func_extern_c_2() {}
diff --git a/clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-var.cpp b/clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-var.cpp
new file mode 100644
index 0000000000000..bd5ef5431de6c
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/misc/use-internal-linkage-var.cpp
@@ -0,0 +1,40 @@
+// RUN: %check_clang_tidy %s misc-use-internal-linkage %t -- -- -I%S/Inputs/use-internal-linkage
+
+#include "var.h"
+
+int global;
+// CHECK-MESSAGES: :[[@LINE-1]]:5: warning: variable 'global'
+
+template<class T>
+T global_template;
+// CHECK-MESSAGES: :[[@LINE-1]]:3: warning: variable 'global_template'
+
+int gloabl_header;
+
+extern int global_extern;
+
+static int global_static;
+
+namespace {
+static int global_anonymous_ns;
+namespace NS {
+static int global_anonymous_ns;
+}
+}
+
+static void f(int para) {
+ int local;
+ static int local_static;
+}
+
+struct S {
+ int m1;
+ static int m2;
+};
+int S::m2;
+
+extern "C" {
+int global_in_extern_c_1;
+}
+
+extern "C" int global_in_extern_c_2;
>From bf2cd4c10ea402057cc326c30db3c7cead41a453 Mon Sep 17 00:00:00 2001
From: OverMighty <its.overmighty at gmail.com>
Date: Sat, 8 Jun 2024 00:49:44 +0200
Subject: [PATCH 06/57] [libc][math][c23] Temporarily disable fmodf16 on
AArch64 (#94813)
See Buildbot failure:
https://lab.llvm.org/buildbot/#/builders/138/builds/67337.
---
libc/config/linux/aarch64/entrypoints.txt | 1 -
1 file changed, 1 deletion(-)
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index 1852772fdbf61..d30f01bb51c48 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -515,7 +515,6 @@ if(LIBC_TYPES_HAS_FLOAT16)
libc.src.math.fminimum_magf16
libc.src.math.fminimum_mag_numf16
libc.src.math.fminimum_numf16
- libc.src.math.fmodf16
libc.src.math.fromfpf16
libc.src.math.fromfpxf16
libc.src.math.llrintf16
>From 1d8e094f93be461c0fddb95385341f29eb456697 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 15:58:26 -0700
Subject: [PATCH 07/57] [RISCV] Remove unused tablegen multiclasses. NFC
---
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 12 ------------
1 file changed, 12 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index b47ba21725da6..603c1985c4fee 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3303,12 +3303,6 @@ multiclass VPseudoTernaryV_VX_AAXA<LMULInfo m, string Constraint = ""> {
Constraint, Commutable=1>;
}
-multiclass VPseudoTernaryV_VF_AAXA<LMULInfo m, FPR_Info f, string Constraint = ""> {
- defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.vrclass, f.fprclass,
- m.vrclass, m, Constraint,
- Commutable=1>;
-}
-
multiclass VPseudoTernaryV_VF_AAXA_RM<LMULInfo m, FPR_Info f,
string Constraint = "", int sew = 0> {
defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, f.fprclass,
@@ -3335,12 +3329,6 @@ multiclass VPseudoTernaryW_VX<LMULInfo m> {
constraint, /*Commutable*/ 0, TargetConstraintType=3>;
}
-multiclass VPseudoTernaryW_VF<LMULInfo m, FPR_Info f, int TargetConstraintType = 1> {
- defvar constraint = "@earlyclobber $rd";
- defm "_V" # f.FX : VPseudoTernaryWithPolicy<m.wvrclass, f.fprclass,
- m.vrclass, m, constraint, /*Commutable*/ 0, TargetConstraintType>;
-}
-
multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f, int sew = 0> {
defvar constraint = "@earlyclobber $rd";
defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, f.fprclass,
>From 0cddd02186e832de6e99f2c115fd13681c66ce47 Mon Sep 17 00:00:00 2001
From: Aiden Grossman <aidengrossman at google.com>
Date: Fri, 7 Jun 2024 23:18:28 +0000
Subject: [PATCH 08/57] Reland "[python] Bump Python minimum version to 3.8
(#78828)"
This reverts commit b6824c9d459da059e247a60c1ebd1aeb580dacc2.
This relands commit 0a6c74e21cc6750c843310ab35b47763cddaaf32.
The original commit was reverted due to buildbot failures. These bots
should be updated now, so hopefully this will stick.
---
llvm/CMakeLists.txt | 4 ++--
llvm/docs/GettingStarted.rst | 8 ++++----
llvm/docs/GettingStartedVS.rst | 2 +-
llvm/docs/ReleaseNotes.rst | 5 +++++
llvm/docs/TestingGuide.rst | 2 +-
5 files changed, 13 insertions(+), 8 deletions(-)
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 64898ab09772f..3208147101c0d 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -903,8 +903,8 @@ set(LLVM_PROFDATA_FILE "" CACHE FILEPATH
"Profiling data file to use when compiling in order to improve runtime performance.")
if(LLVM_INCLUDE_TESTS)
- # Lit test suite requires at least python 3.6
- set(LLVM_MINIMUM_PYTHON_VERSION 3.6)
+ # All LLVM Python files should be compatible down to this minimum version.
+ set(LLVM_MINIMUM_PYTHON_VERSION 3.8)
else()
# FIXME: it is unknown if this is the actual minimum bound
set(LLVM_MINIMUM_PYTHON_VERSION 3.0)
diff --git a/llvm/docs/GettingStarted.rst b/llvm/docs/GettingStarted.rst
index 7ecef78c405b6..0a1913dca8aac 100644
--- a/llvm/docs/GettingStarted.rst
+++ b/llvm/docs/GettingStarted.rst
@@ -292,16 +292,16 @@ uses the package and provides other details.
Package Version Notes
=========================================================== ============ ==========================================
`CMake <http://cmake.org/>`__ >=3.20.0 Makefile/workspace generator
-`python <http://www.python.org/>`_ >=3.6 Automated test suite\ :sup:`1`
+`python <http://www.python.org/>`_ >=3.8 Automated test suite\ :sup:`1`
`zlib <http://zlib.net>`_ >=1.2.3.4 Compression library\ :sup:`2`
`GNU Make <http://savannah.gnu.org/projects/make>`_ 3.79, 3.79.1 Makefile/build processor\ :sup:`3`
=========================================================== ============ ==========================================
.. note::
- #. Only needed if you want to run the automated test suite. Python 3.8.0
- or later is needed on Windows if a substitute (virtual) drive is used
- to access LLVM source code due to ``MAX_PATH`` limitations.
+ #. Only needed if you want to run the automated test suite in the
+ ``llvm/test`` directory, or if you plan to utilize any Python libraries,
+ utilities, or bindings.
#. Optional, adds compression / uncompression capabilities to selected LLVM
tools.
#. Optional, you can use any other build tool supported by CMake.
diff --git a/llvm/docs/GettingStartedVS.rst b/llvm/docs/GettingStartedVS.rst
index a1eb88dccc9e5..4b15272635fbe 100644
--- a/llvm/docs/GettingStartedVS.rst
+++ b/llvm/docs/GettingStartedVS.rst
@@ -55,7 +55,7 @@ Visual Studio 2019 so separate installation is not required. If you do install
CMake separately, Visual Studio 2022 will require CMake Version 3.21 or later.
If you would like to run the LLVM tests you will need `Python
-<http://www.python.org/>`_. Version 3.6 and newer are known to work. You can
+<http://www.python.org/>`_. Version 3.8 and newer are known to work. You can
install Python with Visual Studio 2019, from the Microsoft store or from
the `Python web site <http://www.python.org/>`_. We recommend the latter since it
allows you to adjust installation options.
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 28dd567a61086..e1e1652d1e0a1 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -47,6 +47,11 @@ Non-comprehensive list of changes in this release
Update on required toolchains to build LLVM
-------------------------------------------
+* The minimum Python version has been raised from 3.6 to 3.8 across all of LLVM.
+ This enables the use of many new Python features, aligning more closely with
+ modern Python best practices, and improves CI maintainability
+ See `#78828 <https://github.com/llvm/llvm-project/pull/78828>`_ for more info.
+
Changes to the LLVM IR
----------------------
diff --git a/llvm/docs/TestingGuide.rst b/llvm/docs/TestingGuide.rst
index e24feb3bf5fa2..89499d8b937d5 100644
--- a/llvm/docs/TestingGuide.rst
+++ b/llvm/docs/TestingGuide.rst
@@ -23,7 +23,7 @@ Requirements
============
In order to use the LLVM testing infrastructure, you will need all of the
-software required to build LLVM, as well as `Python <http://python.org>`_ 3.6 or
+software required to build LLVM, as well as `Python <http://python.org>`_ 3.8 or
later.
LLVM Testing Infrastructure Organization
>From 44a4e3b294cff784c229e3ead2d52a3868dffc3c Mon Sep 17 00:00:00 2001
From: srcarroll <50210727+srcarroll at users.noreply.github.com>
Date: Fri, 7 Jun 2024 18:25:43 -0500
Subject: [PATCH 09/57] [mlir][loops] Add getters for multi dim loop variables
in `LoopLikeOpInterface` (#94516)
This patch adds `getLoopInductionVars`, `getLoopLowerBounds`,
`getLoopBounds`, `getLoopSteps` interface methods to
`LoopLIkeOpInterface`. The corresponding single value versions have been
moved to shared class declaration and have been implemented based on the
new interface methods.
---
.../mlir/Dialect/Affine/IR/AffineOps.td | 4 +-
mlir/include/mlir/Dialect/SCF/IR/SCFOps.td | 50 +++++++-----
.../mlir/Interfaces/LoopLikeInterface.td | 81 ++++++++++++++-----
mlir/lib/Dialect/Affine/IR/AffineOps.cpp | 21 ++---
mlir/lib/Dialect/Linalg/Transforms/Loops.cpp | 3 +-
mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp | 6 +-
mlir/lib/Dialect/SCF/IR/SCF.cpp | 70 +++++++---------
.../Dialect/SCF/Transforms/ForallToFor.cpp | 9 +--
.../SCF/Transforms/ForallToParallel.cpp | 9 +--
.../Dialect/SCF/LoopLikeSCFOpsTest.cpp | 70 ++++++++++++----
10 files changed, 198 insertions(+), 125 deletions(-)
diff --git a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
index 3640055ea8da8..dbec741cf1b1f 100644
--- a/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
+++ b/mlir/include/mlir/Dialect/Affine/IR/AffineOps.td
@@ -118,8 +118,8 @@ def AffineForOp : Affine_Op<"for",
[AttrSizedOperandSegments, AutomaticAllocationScope,
ImplicitAffineTerminator, ConditionallySpeculatable,
RecursiveMemoryEffects, DeclareOpInterfaceMethods<LoopLikeOpInterface,
- ["getSingleInductionVar", "getSingleLowerBound", "getSingleStep",
- "getSingleUpperBound", "getYieldedValuesMutable",
+ ["getLoopInductionVars", "getLoopLowerBounds", "getLoopSteps",
+ "getLoopUpperBounds", "getYieldedValuesMutable",
"replaceWithAdditionalYields"]>,
DeclareOpInterfaceMethods<RegionBranchOpInterface,
["getEntrySuccessorOperands"]>]> {
diff --git a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
index 0b063aa772bab..f35ea962bea16 100644
--- a/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
+++ b/mlir/include/mlir/Dialect/SCF/IR/SCFOps.td
@@ -136,8 +136,8 @@ def ExecuteRegionOp : SCF_Op<"execute_region", [
def ForOp : SCF_Op<"for",
[AutomaticAllocationScope, DeclareOpInterfaceMethods<LoopLikeOpInterface,
["getInitsMutable", "getLoopResults", "getRegionIterArgs",
- "getSingleInductionVar", "getSingleLowerBound", "getSingleStep",
- "getSingleUpperBound", "getYieldedValuesMutable",
+ "getLoopInductionVars", "getLoopLowerBounds", "getLoopSteps",
+ "getLoopUpperBounds", "getYieldedValuesMutable",
"promoteIfSingleIteration", "replaceWithAdditionalYields",
"yieldTiledValuesAndReplace"]>,
AllTypesMatch<["lowerBound", "upperBound", "step"]>,
@@ -301,8 +301,8 @@ def ForallOp : SCF_Op<"forall", [
AttrSizedOperandSegments,
AutomaticAllocationScope,
DeclareOpInterfaceMethods<LoopLikeOpInterface,
- ["getInitsMutable", "getRegionIterArgs", "getSingleInductionVar",
- "getSingleLowerBound", "getSingleUpperBound", "getSingleStep",
+ ["getInitsMutable", "getRegionIterArgs", "getLoopInductionVars",
+ "getLoopLowerBounds", "getLoopUpperBounds", "getLoopSteps",
"promoteIfSingleIteration", "yieldTiledValuesAndReplace"]>,
RecursiveMemoryEffects,
SingleBlockImplicitTerminator<"scf::InParallelOp">,
@@ -510,22 +510,31 @@ def ForallOp : SCF_Op<"forall", [
];
let extraClassDeclaration = [{
- // Get lower bounds as OpFoldResult.
+ /// Get induction variables.
+ SmallVector<Value> getInductionVars() {
+ std::optional<SmallVector<Value>> maybeInductionVars = getLoopInductionVars();
+ assert(maybeInductionVars.has_value() && "expected values");
+ return *maybeInductionVars;
+ }
+ /// Get lower bounds as OpFoldResult.
SmallVector<OpFoldResult> getMixedLowerBound() {
- Builder b(getOperation()->getContext());
- return getMixedValues(getStaticLowerBound(), getDynamicLowerBound(), b);
+ std::optional<SmallVector<OpFoldResult>> maybeLowerBounds = getLoopLowerBounds();
+ assert(maybeLowerBounds.has_value() && "expected values");
+ return *maybeLowerBounds;
}
- // Get upper bounds as OpFoldResult.
+ /// Get upper bounds as OpFoldResult.
SmallVector<OpFoldResult> getMixedUpperBound() {
- Builder b(getOperation()->getContext());
- return getMixedValues(getStaticUpperBound(), getDynamicUpperBound(), b);
+ std::optional<SmallVector<OpFoldResult>> maybeUpperBounds = getLoopUpperBounds();
+ assert(maybeUpperBounds.has_value() && "expected values");
+ return *maybeUpperBounds;
}
- // Get steps as OpFoldResult.
+ /// Get steps as OpFoldResult.
SmallVector<OpFoldResult> getMixedStep() {
- Builder b(getOperation()->getContext());
- return getMixedValues(getStaticStep(), getDynamicStep(), b);
+ std::optional<SmallVector<OpFoldResult>> maybeSteps = getLoopSteps();
+ assert(maybeSteps.has_value() && "expected values");
+ return *maybeSteps;
}
/// Get lower bounds as values.
@@ -584,10 +593,6 @@ def ForallOp : SCF_Op<"forall", [
getNumDynamicControlOperands() + getRank());
}
- ::mlir::ValueRange getInductionVars() {
- return getBody()->getArguments().take_front(getRank());
- }
-
::mlir::Value getInductionVar(int64_t idx) {
return getInductionVars()[idx];
}
@@ -765,8 +770,8 @@ def IfOp : SCF_Op<"if", [DeclareOpInterfaceMethods<RegionBranchOpInterface, [
def ParallelOp : SCF_Op<"parallel",
[AutomaticAllocationScope,
AttrSizedOperandSegments,
- DeclareOpInterfaceMethods<LoopLikeOpInterface, ["getSingleInductionVar",
- "getSingleLowerBound", "getSingleUpperBound", "getSingleStep"]>,
+ DeclareOpInterfaceMethods<LoopLikeOpInterface, ["getLoopInductionVars",
+ "getLoopLowerBounds", "getLoopUpperBounds", "getLoopSteps"]>,
RecursiveMemoryEffects,
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
SingleBlockImplicitTerminator<"scf::ReduceOp">,
@@ -846,8 +851,11 @@ def ParallelOp : SCF_Op<"parallel",
];
let extraClassDeclaration = [{
- ValueRange getInductionVars() {
- return getBody()->getArguments();
+ /// Get induction variables.
+ SmallVector<Value> getInductionVars() {
+ std::optional<SmallVector<Value>> maybeInductionVars = getLoopInductionVars();;
+ assert(maybeInductionVars.has_value() && "expected values");
+ return *maybeInductionVars;
}
unsigned getNumLoops() { return getStep().size(); }
unsigned getNumReductions() { return getInitVals().size(); }
diff --git a/mlir/include/mlir/Interfaces/LoopLikeInterface.td b/mlir/include/mlir/Interfaces/LoopLikeInterface.td
index f0dc6e60eba58..b748d5e29114a 100644
--- a/mlir/include/mlir/Interfaces/LoopLikeInterface.td
+++ b/mlir/include/mlir/Interfaces/LoopLikeInterface.td
@@ -93,51 +93,59 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
}]
>,
InterfaceMethod<[{
- If there is a single induction variable return it, otherwise return
- std::nullopt.
+ Return all induction variables, if they exist. If the op has no notion of
+ induction variable, then return std::nullopt. If it does have
+ a notion but an instance doesn't have induction variables, then
+ return empty vector.
}],
- /*retTy=*/"::std::optional<::mlir::Value>",
- /*methodName=*/"getSingleInductionVar",
+ /*retTy=*/"::std::optional<::llvm::SmallVector<::mlir::Value>>",
+ /*methodName=*/"getLoopInductionVars",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return std::nullopt;
+ return ::std::nullopt;
}]
>,
InterfaceMethod<[{
- Return the single lower bound value or attribute if it exists, otherwise
- return std::nullopt.
+ Return all lower bounds, if they exist. If the op has no notion of
+ lower bounds, then return std::nullopt. If it does have
+ a notion but an instance doesn't have lower bounds, then
+ return empty vector.
}],
- /*retTy=*/"::std::optional<::mlir::OpFoldResult>",
- /*methodName=*/"getSingleLowerBound",
+ /*retTy=*/"::std::optional<::llvm::SmallVector<::mlir::OpFoldResult>>",
+ /*methodName=*/"getLoopLowerBounds",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return std::nullopt;
+ return ::std::nullopt;
}]
>,
InterfaceMethod<[{
- Return the single step value or attribute if it exists, otherwise
- return std::nullopt.
+ Return all steps, if they exist. If the op has no notion of
+ steps, then return std::nullopt. If it does have
+ a notion but an instance doesn't have steps, then
+ return empty vector.
}],
- /*retTy=*/"::std::optional<::mlir::OpFoldResult>",
- /*methodName=*/"getSingleStep",
+ /*retTy=*/"::std::optional<::llvm::SmallVector<::mlir::OpFoldResult>>",
+ /*methodName=*/"getLoopSteps",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return std::nullopt;
+ return ::std::nullopt;
}]
>,
InterfaceMethod<[{
- Return the single upper bound value or attribute if it exists, otherwise
- return std::nullopt.
+ Return all upper bounds, if they exist. If the op has no notion of
+ lower bounds, then return std::nullopt. If it does have
+ a notion but an instance doesn't have lower bounds, then
+ return empty vector.
}],
- /*retTy=*/"::std::optional<::mlir::OpFoldResult>",
- /*methodName=*/"getSingleUpperBound",
+ /*retTy=*/"::std::optional<::llvm::SmallVector<::mlir::OpFoldResult>>",
+ /*methodName=*/"getLoopUpperBounds",
/*args=*/(ins),
/*methodBody=*/"",
/*defaultImplementation=*/[{
- return std::nullopt;
+ return ::std::nullopt;
}]
>,
InterfaceMethod<[{
@@ -235,6 +243,39 @@ def LoopLikeOpInterface : OpInterface<"LoopLikeOpInterface"> {
}];
let extraSharedClassDeclaration = [{
+ /// If there is a single induction variable return it, otherwise return
+ /// std::nullopt.
+ ::std::optional<::mlir::Value> getSingleInductionVar() {
+ auto inductionVars = this->getLoopInductionVars();
+ if (inductionVars.has_value() && (*inductionVars).size() == 1)
+ return (*inductionVars)[0];
+ return std::nullopt;
+ }
+ /// Return the single lower bound value or attribute if it exists, otherwise
+ /// return std::nullopt.
+ ::std::optional<::mlir::OpFoldResult> getSingleLowerBound() {
+ auto lowerBounds = this->getLoopLowerBounds();
+ if (lowerBounds.has_value() && (*lowerBounds).size() == 1)
+ return (*lowerBounds)[0];
+ return std::nullopt;
+ }
+ /// Return the single step value or attribute if it exists, otherwise
+ /// return std::nullopt.
+ ::std::optional<::mlir::OpFoldResult> getSingleStep() {
+ auto steps = this->getLoopSteps();
+ if (steps.has_value() && (*steps).size() == 1)
+ return (*steps)[0];
+ return std::nullopt;
+ }
+ /// Return the single upper bound value or attribute if it exists, otherwise
+ /// return std::nullopt.
+ ::std::optional<::mlir::OpFoldResult> getSingleUpperBound() {
+ auto upperBounds = this->getLoopUpperBounds();
+ if (upperBounds.has_value() && (*upperBounds).size() == 1)
+ return (*upperBounds)[0];
+ return std::nullopt;
+ }
+
/// Append the specified additional "init" operands: replace this loop with
/// a new loop that has the additional init operands. The loop body of this
/// loop is moved over to the new loop.
diff --git a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
index 2e31487bd55a0..0a58d2fdb02f5 100644
--- a/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
+++ b/mlir/lib/Dialect/Affine/IR/AffineOps.cpp
@@ -2454,27 +2454,30 @@ bool AffineForOp::matchingBoundOperandList() {
SmallVector<Region *> AffineForOp::getLoopRegions() { return {&getRegion()}; }
-std::optional<Value> AffineForOp::getSingleInductionVar() {
- return getInductionVar();
+std::optional<SmallVector<Value>> AffineForOp::getLoopInductionVars() {
+ return SmallVector<Value>{getInductionVar()};
}
-std::optional<OpFoldResult> AffineForOp::getSingleLowerBound() {
+std::optional<SmallVector<OpFoldResult>> AffineForOp::getLoopLowerBounds() {
if (!hasConstantLowerBound())
return std::nullopt;
OpBuilder b(getContext());
- return OpFoldResult(b.getI64IntegerAttr(getConstantLowerBound()));
+ return SmallVector<OpFoldResult>{
+ OpFoldResult(b.getI64IntegerAttr(getConstantLowerBound()))};
}
-std::optional<OpFoldResult> AffineForOp::getSingleStep() {
+std::optional<SmallVector<OpFoldResult>> AffineForOp::getLoopSteps() {
OpBuilder b(getContext());
- return OpFoldResult(b.getI64IntegerAttr(getStepAsInt()));
+ return SmallVector<OpFoldResult>{
+ OpFoldResult(b.getI64IntegerAttr(getStepAsInt()))};
}
-std::optional<OpFoldResult> AffineForOp::getSingleUpperBound() {
+std::optional<SmallVector<OpFoldResult>> AffineForOp::getLoopUpperBounds() {
if (!hasConstantUpperBound())
- return std::nullopt;
+ return {};
OpBuilder b(getContext());
- return OpFoldResult(b.getI64IntegerAttr(getConstantUpperBound()));
+ return SmallVector<OpFoldResult>{
+ OpFoldResult(b.getI64IntegerAttr(getConstantUpperBound()))};
}
FailureOr<LoopLikeOpInterface> AffineForOp::replaceWithAdditionalYields(
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
index b0a4de2da1e86..8b0e04fb61b1b 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
@@ -184,8 +184,7 @@ static void replaceIndexOpsByInductionVariables(RewriterBase &rewriter,
for (Operation *loopOp : loopOps) {
llvm::TypeSwitch<Operation *>(loopOp)
.Case([&](scf::ParallelOp parallelOp) {
- allIvs.append(parallelOp.getInductionVars().begin(),
- parallelOp.getInductionVars().end());
+ allIvs.append(parallelOp.getInductionVars());
})
.Case([&](scf::ForOp forOp) {
allIvs.push_back(forOp.getInductionVar());
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
index fd314ef9f8134..a0a0e11a6903d 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
@@ -243,7 +243,7 @@ static void calculateTileOffsetsAndSizes(
OpBuilder::InsertionGuard g(b);
b.setInsertionPointToStart(forallOp.getBody(0));
- ValueRange threadIds = forallOp.getInductionVars();
+ SmallVector<Value> threadIds = forallOp.getInductionVars();
SmallVector<OpFoldResult> nonZeroNumThreads =
llvm::to_vector(llvm::make_filter_range(numThreads, [](OpFoldResult ofr) {
return !isConstantIntValue(ofr, 0);
@@ -746,7 +746,7 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
b.getIndexAttr(0));
SmallVector<OpFoldResult> sizes = tiledSizes;
sizes[reductionDim] = b.getIndexAttr(1);
- outOffsets[reductionDim] = forallOp.getInductionVars().front();
+ outOffsets[reductionDim] = forallOp.getInductionVars()[0];
// TODO: use SubsetExtractOpInterface once it is available.
tiledDpsInitOperands.push_back(b.create<tensor::ExtractSliceOp>(
loc, cast<RankedTensorType>(initOperand.getType()),
@@ -814,7 +814,7 @@ FailureOr<linalg::ForallReductionTilingResult> linalg::tileReductionUsingForall(
int64_t sizeIdx = 0;
for (int64_t i = 0, e = numThreads.size(); i < e; ++i) {
if (i == reductionDim) {
- resultOffsetsRank.push_back(forallOp.getInductionVars().front());
+ resultOffsetsRank.push_back(forallOp.getInductionVars()[0]);
resultSizesRank.push_back(b.getIndexAttr(1));
continue;
}
diff --git a/mlir/lib/Dialect/SCF/IR/SCF.cpp b/mlir/lib/Dialect/SCF/IR/SCF.cpp
index 107fd0690f193..5e94f4dc612a7 100644
--- a/mlir/lib/Dialect/SCF/IR/SCF.cpp
+++ b/mlir/lib/Dialect/SCF/IR/SCF.cpp
@@ -378,20 +378,20 @@ LogicalResult ForOp::verifyRegions() {
return success();
}
-std::optional<Value> ForOp::getSingleInductionVar() {
- return getInductionVar();
+std::optional<SmallVector<Value>> ForOp::getLoopInductionVars() {
+ return SmallVector<Value>{getInductionVar()};
}
-std::optional<OpFoldResult> ForOp::getSingleLowerBound() {
- return OpFoldResult(getLowerBound());
+std::optional<SmallVector<OpFoldResult>> ForOp::getLoopLowerBounds() {
+ return SmallVector<OpFoldResult>{OpFoldResult(getLowerBound())};
}
-std::optional<OpFoldResult> ForOp::getSingleStep() {
- return OpFoldResult(getStep());
+std::optional<SmallVector<OpFoldResult>> ForOp::getLoopSteps() {
+ return SmallVector<OpFoldResult>{OpFoldResult(getStep())};
}
-std::optional<OpFoldResult> ForOp::getSingleUpperBound() {
- return OpFoldResult(getUpperBound());
+std::optional<SmallVector<OpFoldResult>> ForOp::getLoopUpperBounds() {
+ return SmallVector<OpFoldResult>{OpFoldResult(getUpperBound())};
}
std::optional<ResultRange> ForOp::getLoopResults() { return getResults(); }
@@ -1428,28 +1428,26 @@ SmallVector<Operation *> ForallOp::getCombiningOps(BlockArgument bbArg) {
return storeOps;
}
-std::optional<Value> ForallOp::getSingleInductionVar() {
- if (getRank() != 1)
- return std::nullopt;
- return getInductionVar(0);
+std::optional<SmallVector<Value>> ForallOp::getLoopInductionVars() {
+ return SmallVector<Value>{getBody()->getArguments().take_front(getRank())};
}
-std::optional<OpFoldResult> ForallOp::getSingleLowerBound() {
- if (getRank() != 1)
- return std::nullopt;
- return getMixedLowerBound()[0];
+// Get lower bounds as OpFoldResult.
+std::optional<SmallVector<OpFoldResult>> ForallOp::getLoopLowerBounds() {
+ Builder b(getOperation()->getContext());
+ return getMixedValues(getStaticLowerBound(), getDynamicLowerBound(), b);
}
-std::optional<OpFoldResult> ForallOp::getSingleUpperBound() {
- if (getRank() != 1)
- return std::nullopt;
- return getMixedUpperBound()[0];
+// Get upper bounds as OpFoldResult.
+std::optional<SmallVector<OpFoldResult>> ForallOp::getLoopUpperBounds() {
+ Builder b(getOperation()->getContext());
+ return getMixedValues(getStaticUpperBound(), getDynamicUpperBound(), b);
}
-std::optional<OpFoldResult> ForallOp::getSingleStep() {
- if (getRank() != 1)
- return std::nullopt;
- return getMixedStep()[0];
+// Get steps as OpFoldResult.
+std::optional<SmallVector<OpFoldResult>> ForallOp::getLoopSteps() {
+ Builder b(getOperation()->getContext());
+ return getMixedValues(getStaticStep(), getDynamicStep(), b);
}
ForallOp mlir::scf::getForallOpThreadIndexOwner(Value val) {
@@ -3008,28 +3006,20 @@ void ParallelOp::print(OpAsmPrinter &p) {
SmallVector<Region *> ParallelOp::getLoopRegions() { return {&getRegion()}; }
-std::optional<Value> ParallelOp::getSingleInductionVar() {
- if (getNumLoops() != 1)
- return std::nullopt;
- return getBody()->getArgument(0);
+std::optional<SmallVector<Value>> ParallelOp::getLoopInductionVars() {
+ return SmallVector<Value>{getBody()->getArguments()};
}
-std::optional<OpFoldResult> ParallelOp::getSingleLowerBound() {
- if (getNumLoops() != 1)
- return std::nullopt;
- return getLowerBound()[0];
+std::optional<SmallVector<OpFoldResult>> ParallelOp::getLoopLowerBounds() {
+ return getLowerBound();
}
-std::optional<OpFoldResult> ParallelOp::getSingleUpperBound() {
- if (getNumLoops() != 1)
- return std::nullopt;
- return getUpperBound()[0];
+std::optional<SmallVector<OpFoldResult>> ParallelOp::getLoopUpperBounds() {
+ return getUpperBound();
}
-std::optional<OpFoldResult> ParallelOp::getSingleStep() {
- if (getNumLoops() != 1)
- return std::nullopt;
- return getStep()[0];
+std::optional<SmallVector<OpFoldResult>> ParallelOp::getLoopSteps() {
+ return getStep();
}
ParallelOp mlir::scf::getParallelForInductionVarOwner(Value val) {
diff --git a/mlir/lib/Dialect/SCF/Transforms/ForallToFor.cpp b/mlir/lib/Dialect/SCF/Transforms/ForallToFor.cpp
index 198cb2e6cc69e..5da1b76e929be 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ForallToFor.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ForallToFor.cpp
@@ -34,12 +34,9 @@ mlir::scf::forallToForLoop(RewriterBase &rewriter, scf::ForallOp forallOp,
rewriter.setInsertionPoint(forallOp);
Location loc = forallOp.getLoc();
- SmallVector<Value> lbs = getValueOrCreateConstantIndexOp(
- rewriter, loc, forallOp.getMixedLowerBound());
- SmallVector<Value> ubs = getValueOrCreateConstantIndexOp(
- rewriter, loc, forallOp.getMixedUpperBound());
- SmallVector<Value> steps =
- getValueOrCreateConstantIndexOp(rewriter, loc, forallOp.getMixedStep());
+ SmallVector<Value> lbs = forallOp.getLowerBound(rewriter);
+ SmallVector<Value> ubs = forallOp.getUpperBound(rewriter);
+ SmallVector<Value> steps = forallOp.getStep(rewriter);
LoopNest loopNest = scf::buildLoopNest(rewriter, loc, lbs, ubs, steps);
SmallVector<Value> ivs = llvm::map_to_vector(
diff --git a/mlir/lib/Dialect/SCF/Transforms/ForallToParallel.cpp b/mlir/lib/Dialect/SCF/Transforms/ForallToParallel.cpp
index 1fc0331300379..44e6840b03a3d 100644
--- a/mlir/lib/Dialect/SCF/Transforms/ForallToParallel.cpp
+++ b/mlir/lib/Dialect/SCF/Transforms/ForallToParallel.cpp
@@ -35,12 +35,9 @@ LogicalResult mlir::scf::forallToParallelLoop(RewriterBase &rewriter,
"only fully bufferized scf.forall ops can be lowered to scf.parallel");
// Convert mixed bounds and steps to SSA values.
- SmallVector<Value> lbs = getValueOrCreateConstantIndexOp(
- rewriter, loc, forallOp.getMixedLowerBound());
- SmallVector<Value> ubs = getValueOrCreateConstantIndexOp(
- rewriter, loc, forallOp.getMixedUpperBound());
- SmallVector<Value> steps =
- getValueOrCreateConstantIndexOp(rewriter, loc, forallOp.getMixedStep());
+ SmallVector<Value> lbs = forallOp.getLowerBound(rewriter);
+ SmallVector<Value> ubs = forallOp.getUpperBound(rewriter);
+ SmallVector<Value> steps = forallOp.getStep(rewriter);
// Create empty scf.parallel op.
auto parallelOp = rewriter.create<scf::ParallelOp>(loc, lbs, ubs, steps);
diff --git a/mlir/unittests/Dialect/SCF/LoopLikeSCFOpsTest.cpp b/mlir/unittests/Dialect/SCF/LoopLikeSCFOpsTest.cpp
index 6bc0fd6113b9b..53a4af14d119a 100644
--- a/mlir/unittests/Dialect/SCF/LoopLikeSCFOpsTest.cpp
+++ b/mlir/unittests/Dialect/SCF/LoopLikeSCFOpsTest.cpp
@@ -27,27 +27,65 @@ class SCFLoopLikeTest : public ::testing::Test {
}
void checkUnidimensional(LoopLikeOpInterface loopLikeOp) {
- std::optional<OpFoldResult> maybeLb = loopLikeOp.getSingleLowerBound();
- EXPECT_TRUE(maybeLb.has_value());
- std::optional<OpFoldResult> maybeUb = loopLikeOp.getSingleUpperBound();
- EXPECT_TRUE(maybeUb.has_value());
- std::optional<OpFoldResult> maybeStep = loopLikeOp.getSingleStep();
- EXPECT_TRUE(maybeStep.has_value());
- std::optional<OpFoldResult> maybeIndVar =
+ std::optional<OpFoldResult> maybeSingleLb =
+ loopLikeOp.getSingleLowerBound();
+ EXPECT_TRUE(maybeSingleLb.has_value());
+ std::optional<OpFoldResult> maybeSingleUb =
+ loopLikeOp.getSingleUpperBound();
+ EXPECT_TRUE(maybeSingleUb.has_value());
+ std::optional<OpFoldResult> maybeSingleStep = loopLikeOp.getSingleStep();
+ EXPECT_TRUE(maybeSingleStep.has_value());
+ std::optional<OpFoldResult> maybeSingleIndVar =
loopLikeOp.getSingleInductionVar();
- EXPECT_TRUE(maybeIndVar.has_value());
+ EXPECT_TRUE(maybeSingleIndVar.has_value());
+
+ std::optional<SmallVector<OpFoldResult>> maybeLb =
+ loopLikeOp.getLoopLowerBounds();
+ ASSERT_TRUE(maybeLb.has_value());
+ EXPECT_EQ((*maybeLb).size(), 1u);
+ std::optional<SmallVector<OpFoldResult>> maybeUb =
+ loopLikeOp.getLoopUpperBounds();
+ ASSERT_TRUE(maybeUb.has_value());
+ EXPECT_EQ((*maybeUb).size(), 1u);
+ std::optional<SmallVector<OpFoldResult>> maybeStep =
+ loopLikeOp.getLoopSteps();
+ ASSERT_TRUE(maybeStep.has_value());
+ EXPECT_EQ((*maybeStep).size(), 1u);
+ std::optional<SmallVector<Value>> maybeInductionVars =
+ loopLikeOp.getLoopInductionVars();
+ ASSERT_TRUE(maybeInductionVars.has_value());
+ EXPECT_EQ((*maybeInductionVars).size(), 1u);
}
void checkMultidimensional(LoopLikeOpInterface loopLikeOp) {
- std::optional<OpFoldResult> maybeLb = loopLikeOp.getSingleLowerBound();
- EXPECT_FALSE(maybeLb.has_value());
- std::optional<OpFoldResult> maybeUb = loopLikeOp.getSingleUpperBound();
- EXPECT_FALSE(maybeUb.has_value());
- std::optional<OpFoldResult> maybeStep = loopLikeOp.getSingleStep();
- EXPECT_FALSE(maybeStep.has_value());
- std::optional<OpFoldResult> maybeIndVar =
+ std::optional<OpFoldResult> maybeSingleLb =
+ loopLikeOp.getSingleLowerBound();
+ EXPECT_FALSE(maybeSingleLb.has_value());
+ std::optional<OpFoldResult> maybeSingleUb =
+ loopLikeOp.getSingleUpperBound();
+ EXPECT_FALSE(maybeSingleUb.has_value());
+ std::optional<OpFoldResult> maybeSingleStep = loopLikeOp.getSingleStep();
+ EXPECT_FALSE(maybeSingleStep.has_value());
+ std::optional<OpFoldResult> maybeSingleIndVar =
loopLikeOp.getSingleInductionVar();
- EXPECT_FALSE(maybeIndVar.has_value());
+ EXPECT_FALSE(maybeSingleIndVar.has_value());
+
+ std::optional<SmallVector<OpFoldResult>> maybeLb =
+ loopLikeOp.getLoopLowerBounds();
+ ASSERT_TRUE(maybeLb.has_value());
+ EXPECT_EQ((*maybeLb).size(), 2u);
+ std::optional<SmallVector<OpFoldResult>> maybeUb =
+ loopLikeOp.getLoopUpperBounds();
+ ASSERT_TRUE(maybeUb.has_value());
+ EXPECT_EQ((*maybeUb).size(), 2u);
+ std::optional<SmallVector<OpFoldResult>> maybeStep =
+ loopLikeOp.getLoopSteps();
+ ASSERT_TRUE(maybeStep.has_value());
+ EXPECT_EQ((*maybeStep).size(), 2u);
+ std::optional<SmallVector<Value>> maybeInductionVars =
+ loopLikeOp.getLoopInductionVars();
+ ASSERT_TRUE(maybeInductionVars.has_value());
+ EXPECT_EQ((*maybeInductionVars).size(), 2u);
}
MLIRContext context;
>From 86ef8f30c7b05e21ea068249a6c82be438f94be6 Mon Sep 17 00:00:00 2001
From: Teresa Johnson <tejohnson at google.com>
Date: Fri, 7 Jun 2024 16:26:41 -0700
Subject: [PATCH 10/57] [MemProf] Add matching statistics and tracing (#94814)
To help debug or surface matching issues, add more statistics to the
matching. Also add optional emission of each context seen in the
function profiles along with its allocation type, size in bytes, and
whether it was matched. This information is emitted along with a hash of
the full stack context, to allow deduplication across modules for
allocations within header files.
---
.../Instrumentation/MemProfiler.cpp | 89 +++++++++++++++++--
llvm/test/Transforms/PGOProfile/memprof.ll | 21 ++++-
2 files changed, 101 insertions(+), 9 deletions(-)
diff --git a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
index d70c6a7a0a152..14d0981da501d 100644
--- a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
@@ -149,11 +149,33 @@ static cl::opt<bool> ClMemProfMatchHotColdNew(
"Match allocation profiles onto existing hot/cold operator new calls"),
cl::Hidden, cl::init(false));
+static cl::opt<bool>
+ ClPrintMemProfMatchInfo("memprof-print-match-info",
+ cl::desc("Print matching stats for each allocation "
+ "context in this module's profiles"),
+ cl::Hidden, cl::init(false));
+
+// Instrumentation statistics
STATISTIC(NumInstrumentedReads, "Number of instrumented reads");
STATISTIC(NumInstrumentedWrites, "Number of instrumented writes");
STATISTIC(NumSkippedStackReads, "Number of non-instrumented stack reads");
STATISTIC(NumSkippedStackWrites, "Number of non-instrumented stack writes");
+
+// Matching statistics
STATISTIC(NumOfMemProfMissing, "Number of functions without memory profile.");
+STATISTIC(NumOfMemProfMismatch,
+ "Number of functions having mismatched memory profile hash.");
+STATISTIC(NumOfMemProfFunc, "Number of functions having valid memory profile.");
+STATISTIC(NumOfMemProfAllocContextProfiles,
+ "Number of alloc contexts in memory profile.");
+STATISTIC(NumOfMemProfCallSiteProfiles,
+ "Number of callsites in memory profile.");
+STATISTIC(NumOfMemProfMatchedAllocContexts,
+ "Number of matched memory profile alloc contexts.");
+STATISTIC(NumOfMemProfMatchedAllocs,
+ "Number of matched memory profile allocs.");
+STATISTIC(NumOfMemProfMatchedCallSites,
+ "Number of matched memory profile callsites.");
namespace {
@@ -637,8 +659,22 @@ static uint64_t computeStackId(const memprof::Frame &Frame) {
return computeStackId(Frame.Function, Frame.LineOffset, Frame.Column);
}
-static void addCallStack(CallStackTrie &AllocTrie,
- const AllocationInfo *AllocInfo) {
+// Helper to generate a single hash id for a given callstack, used for emitting
+// matching statistics and useful for uniquing such statistics across modules.
+static uint64_t
+computeFullStackId(const SmallVectorImpl<memprof::Frame> &CallStack) {
+ llvm::HashBuilder<llvm::TruncatedBLAKE3<8>, llvm::endianness::little>
+ HashBuilder;
+ for (auto &F : CallStack)
+ HashBuilder.add(F.Function, F.LineOffset, F.Column);
+ llvm::BLAKE3Result<8> Hash = HashBuilder.final();
+ uint64_t Id;
+ std::memcpy(&Id, Hash.data(), sizeof(Hash));
+ return Id;
+}
+
+static AllocationType addCallStack(CallStackTrie &AllocTrie,
+ const AllocationInfo *AllocInfo) {
SmallVector<uint64_t> StackIds;
for (const auto &StackFrame : AllocInfo->CallStack)
StackIds.push_back(computeStackId(StackFrame));
@@ -646,6 +682,7 @@ static void addCallStack(CallStackTrie &AllocTrie,
AllocInfo->Info.getAllocCount(),
AllocInfo->Info.getTotalLifetime());
AllocTrie.addCallStack(AllocType, StackIds);
+ return AllocType;
}
// Helper to compare the InlinedCallStack computed from an instruction's debug
@@ -701,9 +738,16 @@ static bool isNewWithHotColdVariant(Function *Callee,
}
}
-static void readMemprof(Module &M, Function &F,
- IndexedInstrProfReader *MemProfReader,
- const TargetLibraryInfo &TLI) {
+struct AllocMatchInfo {
+ uint64_t TotalSize = 0;
+ AllocationType AllocType = AllocationType::None;
+ bool Matched = false;
+};
+
+static void
+readMemprof(Module &M, Function &F, IndexedInstrProfReader *MemProfReader,
+ const TargetLibraryInfo &TLI,
+ std::map<uint64_t, AllocMatchInfo> &FullStackIdToAllocMatchInfo) {
auto &Ctx = M.getContext();
// Previously we used getIRPGOFuncName() here. If F is local linkage,
// getIRPGOFuncName() returns FuncName with prefix 'FileName;'. But
@@ -727,6 +771,7 @@ static void readMemprof(Module &M, Function &F,
SkipWarning = !PGOWarnMissing;
LLVM_DEBUG(dbgs() << "unknown function");
} else if (Err == instrprof_error::hash_mismatch) {
+ NumOfMemProfMismatch++;
SkipWarning =
NoPGOWarnMismatch ||
(NoPGOWarnMismatchComdatWeak &&
@@ -748,6 +793,8 @@ static void readMemprof(Module &M, Function &F,
return;
}
+ NumOfMemProfFunc++;
+
// Detect if there are non-zero column numbers in the profile. If not,
// treat all column numbers as 0 when matching (i.e. ignore any non-zero
// columns in the IR). The profiled binary might have been built with
@@ -762,6 +809,7 @@ static void readMemprof(Module &M, Function &F,
std::map<uint64_t, std::set<std::pair<const std::vector<Frame> *, unsigned>>>
LocHashToCallSites;
for (auto &AI : MemProfRec->AllocSites) {
+ NumOfMemProfAllocContextProfiles++;
// Associate the allocation info with the leaf frame. The later matching
// code will match any inlined call sequences in the IR with a longer prefix
// of call stack frames.
@@ -770,6 +818,7 @@ static void readMemprof(Module &M, Function &F,
ProfileHasColumns |= AI.CallStack[0].Column;
}
for (auto &CS : MemProfRec->CallSites) {
+ NumOfMemProfCallSiteProfiles++;
// Need to record all frames from leaf up to and including this function,
// as any of these may or may not have been inlined at this point.
unsigned Idx = 0;
@@ -863,13 +912,23 @@ static void readMemprof(Module &M, Function &F,
// If we found and thus matched all frames on the call, include
// this MIB.
if (stackFrameIncludesInlinedCallStack(AllocInfo->CallStack,
- InlinedCallStack))
- addCallStack(AllocTrie, AllocInfo);
+ InlinedCallStack)) {
+ NumOfMemProfMatchedAllocContexts++;
+ auto AllocType = addCallStack(AllocTrie, AllocInfo);
+ // Record information about the allocation if match info printing
+ // was requested.
+ if (ClPrintMemProfMatchInfo) {
+ auto FullStackId = computeFullStackId(AllocInfo->CallStack);
+ FullStackIdToAllocMatchInfo[FullStackId] = {
+ AllocInfo->Info.getTotalSize(), AllocType, /*Matched=*/true};
+ }
+ }
}
// We might not have matched any to the full inlined call stack.
// But if we did, create and attach metadata, or a function attribute if
// all contexts have identical profiled behavior.
if (!AllocTrie.empty()) {
+ NumOfMemProfMatchedAllocs++;
// MemprofMDAttached will be false if a function attribute was
// attached.
bool MemprofMDAttached = AllocTrie.buildAndAttachMIBMetadata(CI);
@@ -897,6 +956,7 @@ static void readMemprof(Module &M, Function &F,
// attach call stack metadata.
if (stackFrameIncludesInlinedCallStack(
*CallStackIdx.first, InlinedCallStack, CallStackIdx.second)) {
+ NumOfMemProfMatchedCallSites++;
addCallsiteMetadata(I, InlinedCallStack, Ctx);
// Only need to find one with a matching call stack and add a single
// callsite metadata.
@@ -942,12 +1002,25 @@ PreservedAnalyses MemProfUsePass::run(Module &M, ModuleAnalysisManager &AM) {
auto &FAM = AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
+ // Map from the stack has of each allocation context in the function profiles
+ // to the total profiled size (bytes), allocation type, and whether we matched
+ // it to an allocation in the IR.
+ std::map<uint64_t, AllocMatchInfo> FullStackIdToAllocMatchInfo;
+
for (auto &F : M) {
if (F.isDeclaration())
continue;
const TargetLibraryInfo &TLI = FAM.getResult<TargetLibraryAnalysis>(F);
- readMemprof(M, F, MemProfReader.get(), TLI);
+ readMemprof(M, F, MemProfReader.get(), TLI, FullStackIdToAllocMatchInfo);
+ }
+
+ if (ClPrintMemProfMatchInfo) {
+ for (const auto &[Id, Info] : FullStackIdToAllocMatchInfo)
+ errs() << "MemProf " << getAllocTypeAttributeString(Info.AllocType)
+ << " context with id " << Id << " has total profiled size "
+ << Info.TotalSize << (Info.Matched ? " is" : " not")
+ << " matched\n";
}
return PreservedAnalyses::none();
diff --git a/llvm/test/Transforms/PGOProfile/memprof.ll b/llvm/test/Transforms/PGOProfile/memprof.ll
index 13f370a4071e8..4a87f4f9d7449 100644
--- a/llvm/test/Transforms/PGOProfile/memprof.ll
+++ b/llvm/test/Transforms/PGOProfile/memprof.ll
@@ -5,6 +5,8 @@
; REQUIRES: zlib
;; Avoid failures on big-endian systems that can't read the profile properly
; REQUIRES: x86_64-linux
+;; -stats requires asserts
+; REQUIRES: asserts
;; TODO: Use text profile inputs once that is available for memprof.
;; # To update the Inputs below, run Inputs/update_memprof_inputs.sh.
@@ -25,7 +27,7 @@
; ALL-NOT: no profile data available for function
;; Using a memprof-only profile for memprof-use should only give memprof metadata
-; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY
+; RUN: opt < %s -passes='memprof-use<profile-filename=%t.memprofdata>' -pgo-warn-missing-function -S -memprof-print-match-info -stats 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,MEMPROFONLY,MEMPROFMATCHINFO,MEMPROFSTATS
; There should not be any PGO metadata
; MEMPROFONLY-NOT: !prof
@@ -61,6 +63,15 @@
;; give both memprof and pgo metadata.
; RUN: opt < %s -passes='pgo-instr-use,memprof-use<profile-filename=%t.pgomemprofdata>' -pgo-test-profile-file=%t.pgomemprofdata -pgo-warn-missing-function -S 2>&1 | FileCheck %s --check-prefixes=MEMPROF,ALL,PGO
+; MEMPROFMATCHINFO: MemProf notcold context with id 1093248920606587996 has total profiled size 10 is matched
+; MEMPROFMATCHINFO: MemProf notcold context with id 5725971306423925017 has total profiled size 10 is matched
+; MEMPROFMATCHINFO: MemProf notcold context with id 6792096022461663180 has total profiled size 10 is matched
+; MEMPROFMATCHINFO: MemProf cold context with id 8525406123785421946 has total profiled size 10 is matched
+; MEMPROFMATCHINFO: MemProf cold context with id 11714230664165068698 has total profiled size 10 is matched
+; MEMPROFMATCHINFO: MemProf cold context with id 15737101490731057601 has total profiled size 10 is matched
+; MEMPROFMATCHINFO: MemProf cold context with id 16342802530253093571 has total profiled size 10 is matched
+; MEMPROFMATCHINFO: MemProf cold context with id 18254812774972004394 has total profiled size 10 is matched
+
; ModuleID = 'memprof.cc'
source_filename = "memprof.cc"
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
@@ -346,6 +357,14 @@ for.end: ; preds = %for.cond
; MEMPROFNOCOLINFO: ![[C10]] = !{i64 -4535090212904553409}
; MEMPROFNOCOLINFO: ![[C11]] = !{i64 3577763375057267810}
+; MEMPROFSTATS: 8 memprof - Number of alloc contexts in memory profile.
+; MEMPROFSTATS: 10 memprof - Number of callsites in memory profile.
+; MEMPROFSTATS: 6 memprof - Number of functions having valid memory profile.
+; MEMPROFSTATS: 8 memprof - Number of matched memory profile alloc contexts.
+; MEMPROFSTATS: 3 memprof - Number of matched memory profile allocs.
+; MEMPROFSTATS: 10 memprof - Number of matched memory profile callsites.
+
+
; Function Attrs: argmemonly nofree nounwind willreturn writeonly
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg) #3
>From a569d927caa065f11d2ed36fb7ef32be52d53ecd Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Fri, 7 Jun 2024 16:40:19 -0700
Subject: [PATCH 11/57] [memprof] Fix a build error
---
llvm/lib/Transforms/Instrumentation/MemProfiler.cpp | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
index 14d0981da501d..aac57231ba2ed 100644
--- a/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemProfiler.cpp
@@ -662,7 +662,7 @@ static uint64_t computeStackId(const memprof::Frame &Frame) {
// Helper to generate a single hash id for a given callstack, used for emitting
// matching statistics and useful for uniquing such statistics across modules.
static uint64_t
-computeFullStackId(const SmallVectorImpl<memprof::Frame> &CallStack) {
+computeFullStackId(const std::vector<memprof::Frame> &CallStack) {
llvm::HashBuilder<llvm::TruncatedBLAKE3<8>, llvm::endianness::little>
HashBuilder;
for (auto &F : CallStack)
>From 779d1b2e98d7f43a3027a6a8a174cd9a3bb085b4 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 16:03:05 -0700
Subject: [PATCH 12/57] [RISCV] Remove unused tablegen multiclass. NFC
---
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 11 -----------
1 file changed, 11 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 603c1985c4fee..8a0308f8aa2ab 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2369,12 +2369,6 @@ multiclass VPseudoBinaryW_VI<Operand ImmType, LMULInfo m> {
"@earlyclobber $rd", TargetConstraintType=3>;
}
-multiclass VPseudoBinaryW_VF<LMULInfo m, FPR_Info f> {
- defm "_V" # f.FX : VPseudoBinary<m.wvrclass, m.vrclass,
- f.fprclass, m,
- "@earlyclobber $rd">;
-}
-
multiclass VPseudoBinaryW_VF_RM<LMULInfo m, FPR_Info f, int sew = 0> {
defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass,
f.fprclass, m,
@@ -2403,11 +2397,6 @@ multiclass VPseudoBinaryW_WX<LMULInfo m> {
defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m, /*Constraint*/ "", TargetConstraintType=3>;
}
-multiclass VPseudoBinaryW_WF<LMULInfo m, FPR_Info f, int TargetConstraintType = 1> {
- defm "_W" # f.FX : VPseudoBinary<m.wvrclass, m.wvrclass,
- f.fprclass, m, /*Constraint*/ "", TargetConstraintType=TargetConstraintType>;
-}
-
multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f, int sew = 0> {
defm "_W" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass,
f.fprclass, m,
>From ea8ec3846e1969e7b5701622f7a99e36d538a979 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 16:31:06 -0700
Subject: [PATCH 13/57] [RISCV] Remove CarryIn and Constraint parameters from
VPseudoTiedBinaryCarryIn. NFC
They were always passed the same values, 1 for CarryIn and "" for
Constraint.
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 33 +++++++------------
1 file changed, 12 insertions(+), 21 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 8a0308f8aa2ab..72e8ae75b5832 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1549,20 +1549,15 @@ class VPseudoTiedBinaryCarryIn<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
- bit CarryIn,
- string Constraint,
int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
- !if(CarryIn,
- (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
- VMV0:$carry, AVL:$vl, ixlenimm:$sew),
- (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
- AVL:$vl, ixlenimm:$sew)), []>,
+ (ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1,
+ VMV0:$carry, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
- let Constraints = !interleave([Constraint, "$rd = $merge"], ",");
+ let Constraints = "$rd = $merge";
let TargetOverlapConstraintType = TargetConstraintType;
let HasVLOp = 1;
let HasSEWOp = 1;
@@ -2465,13 +2460,11 @@ multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>;
}
-multiclass VPseudoTiedBinaryV_VM<LMULInfo m, int TargetConstraintType = 1,
- bit Commutable = 0> {
+multiclass VPseudoTiedBinaryV_VM<LMULInfo m, bit Commutable = 0> {
let isCommutable = Commutable in
def "_VVM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
- m.vrclass, m.vrclass, m, 1, "",
- TargetConstraintType>;
+ m.vrclass, m.vrclass, m>;
}
multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
@@ -2483,11 +2476,10 @@ multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
m.vrclass, GPR, m, CarryIn, Constraint, TargetConstraintType>;
}
-multiclass VPseudoTiedBinaryV_XM<LMULInfo m, int TargetConstraintType = 1> {
+multiclass VPseudoTiedBinaryV_XM<LMULInfo m> {
def "_VXM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
- m.vrclass, GPR, m, 1, "",
- TargetConstraintType>;
+ m.vrclass, GPR, m>;
}
multiclass VPseudoVMRG_FM {
@@ -2496,8 +2488,7 @@ multiclass VPseudoVMRG_FM {
defvar mx = m.MX;
def "_V" # f.FX # "M_" # mx
: VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R, m.vrclass,
- f.fprclass, m, CarryIn=1,
- Constraint = "">,
+ f.fprclass, m>,
SchedBinary<"WriteVFMergeV", "ReadVFMergeV", "ReadVFMergeF", mx,
forceMasked=1, forceMergeOpRead=true>;
}
@@ -2516,7 +2507,7 @@ multiclass VPseudoBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
multiclass VPseudoTiedBinaryV_IM<LMULInfo m> {
def "_VIM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
- m.vrclass, simm5, m, 1, "">;
+ m.vrclass, simm5, m>;
}
multiclass VPseudoUnaryVMV_V_X_I {
@@ -3073,17 +3064,17 @@ multiclass VPseudoVMRG_VM_XM_IM {
defvar mx = m.MX;
def "_VVM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
- m.vrclass, m.vrclass, m, 1, "">,
+ m.vrclass, m.vrclass, m>,
SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
forceMergeOpRead=true>;
def "_VXM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
- m.vrclass, GPR, m, 1, "">,
+ m.vrclass, GPR, m>,
SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
forceMergeOpRead=true>;
def "_VIM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
- m.vrclass, simm5, m, 1, "">,
+ m.vrclass, simm5, m>,
SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
forceMergeOpRead=true>;
}
>From b13b130476b1603631fff59333c54775b8a5de92 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 16:47:06 -0700
Subject: [PATCH 14/57] [RISCV] Rename VPseudoBinaryCarryIn to
VPseudoBinaryCarry. NFC
It doesn't always have a CarryIn. One of the parameters is named
CarryIn. It always has CarryOut or a CarryIn and in some cases both.
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 38 +++++++++----------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 72e8ae75b5832..818073d049192 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1521,13 +1521,13 @@ class VPseudoTiedBinaryMaskRoundingMode<VReg RetClass,
let UsesVXRM = 0;
}
-class VPseudoBinaryCarryIn<VReg RetClass,
- VReg Op1Class,
- DAGOperand Op2Class,
- LMULInfo MInfo,
- bit CarryIn,
- string Constraint,
- int TargetConstraintType = 1> :
+class VPseudoBinaryCarry<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class,
+ LMULInfo MInfo,
+ bit CarryIn,
+ string Constraint,
+ int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
!if(CarryIn,
(ins Op1Class:$rs2, Op2Class:$rs1,
@@ -2454,10 +2454,10 @@ multiclass VPseudoBinaryV_VM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
int TargetConstraintType = 1> {
let isCommutable = Commutable in
def "_VV" # !if(CarryIn, "M", "") # "_" # m.MX :
- VPseudoBinaryCarryIn<!if(CarryOut, VR,
- !if(!and(CarryIn, !not(CarryOut)),
- GetVRegNoV0<m.vrclass>.R, m.vrclass)),
- m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>;
+ VPseudoBinaryCarry<!if(CarryOut, VR,
+ !if(!and(CarryIn, !not(CarryOut)),
+ GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+ m.vrclass, m.vrclass, m, CarryIn, Constraint, TargetConstraintType>;
}
multiclass VPseudoTiedBinaryV_VM<LMULInfo m, bit Commutable = 0> {
@@ -2470,10 +2470,10 @@ multiclass VPseudoTiedBinaryV_VM<LMULInfo m, bit Commutable = 0> {
multiclass VPseudoBinaryV_XM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
string Constraint = "", int TargetConstraintType = 1> {
def "_VX" # !if(CarryIn, "M", "") # "_" # m.MX :
- VPseudoBinaryCarryIn<!if(CarryOut, VR,
- !if(!and(CarryIn, !not(CarryOut)),
- GetVRegNoV0<m.vrclass>.R, m.vrclass)),
- m.vrclass, GPR, m, CarryIn, Constraint, TargetConstraintType>;
+ VPseudoBinaryCarry<!if(CarryOut, VR,
+ !if(!and(CarryIn, !not(CarryOut)),
+ GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+ m.vrclass, GPR, m, CarryIn, Constraint, TargetConstraintType>;
}
multiclass VPseudoTiedBinaryV_XM<LMULInfo m> {
@@ -2498,10 +2498,10 @@ multiclass VPseudoVMRG_FM {
multiclass VPseudoBinaryV_IM<LMULInfo m, bit CarryOut = 0, bit CarryIn = 1,
string Constraint = "", int TargetConstraintType = 1> {
def "_VI" # !if(CarryIn, "M", "") # "_" # m.MX :
- VPseudoBinaryCarryIn<!if(CarryOut, VR,
- !if(!and(CarryIn, !not(CarryOut)),
- GetVRegNoV0<m.vrclass>.R, m.vrclass)),
- m.vrclass, simm5, m, CarryIn, Constraint, TargetConstraintType>;
+ VPseudoBinaryCarry<!if(CarryOut, VR,
+ !if(!and(CarryIn, !not(CarryOut)),
+ GetVRegNoV0<m.vrclass>.R, m.vrclass)),
+ m.vrclass, simm5, m, CarryIn, Constraint, TargetConstraintType>;
}
multiclass VPseudoTiedBinaryV_IM<LMULInfo m> {
>From bf676f31795aeff8dbd4533bf2674dc8873be8e6 Mon Sep 17 00:00:00 2001
From: jimingham <jingham at apple.com>
Date: Fri, 7 Jun 2024 17:05:29 -0700
Subject: [PATCH 15/57] Add AllowRepeats to SBCommandInterpreterRunOptions.
(#94786)
This is useful if you have a transcript of a user session and want to
rerun those commands with RunCommandInterpreter. The same functionality
is also useful in testing.
I'm adding it primarily for the second reason. In a subsequent patch,
I'm adding the ability to Python based commands to provide their
"auto-repeat" command. Among other things, that will allow potentially
state destroying user commands to prevent auto-repeat. Testing this with
Shell or pexpect tests is not nearly as accurate or convenient as using
RunCommandInterpreter, but to use that I need to allow auto-repeat.
I think for consistency's sake, having interactive sessions always do
auto-repeats is the right choice, though that's a lightly held
opinion...
---
...SBCommandInterpreterRunOptionsDocstrings.i | 3 +
.../lldb/API/SBCommandInterpreterRunOptions.h | 8 +++
.../lldb/Interpreter/CommandInterpreter.h | 16 ++++-
.../API/SBCommandInterpreterRunOptions.cpp | 12 ++++
.../source/Interpreter/CommandInterpreter.cpp | 14 ++++-
.../TestRunCommandInterpreterAPI.py | 59 ++++++++++++++++---
6 files changed, 98 insertions(+), 14 deletions(-)
diff --git a/lldb/bindings/interface/SBCommandInterpreterRunOptionsDocstrings.i b/lldb/bindings/interface/SBCommandInterpreterRunOptionsDocstrings.i
index b37da0535d18a..a4398d95ed0d1 100644
--- a/lldb/bindings/interface/SBCommandInterpreterRunOptionsDocstrings.i
+++ b/lldb/bindings/interface/SBCommandInterpreterRunOptionsDocstrings.i
@@ -10,5 +10,8 @@ A default SBCommandInterpreterRunOptions object has:
* PrintResults: true
* PrintErrors: true
* AddToHistory: true
+* AllowRepeats false
+Interactive debug sessions always allow repeats, the AllowRepeats
+run option only affects non-interactive sessions.
") lldb::SBCommandInterpreterRunOptions;
diff --git a/lldb/include/lldb/API/SBCommandInterpreterRunOptions.h b/lldb/include/lldb/API/SBCommandInterpreterRunOptions.h
index 69b969267e755..0f248c926d454 100644
--- a/lldb/include/lldb/API/SBCommandInterpreterRunOptions.h
+++ b/lldb/include/lldb/API/SBCommandInterpreterRunOptions.h
@@ -72,6 +72,14 @@ class LLDB_API SBCommandInterpreterRunOptions {
void SetSpawnThread(bool);
+ bool GetAllowRepeats() const;
+
+ /// By default, RunCommandInterpreter will discard repeats if the
+ /// IOHandler being used is not interactive. Setting AllowRepeats to true
+ /// will override this behavior and always process empty lines in the input
+ /// as a repeat command.
+ void SetAllowRepeats(bool);
+
private:
lldb_private::CommandInterpreterRunOptions *get() const;
diff --git a/lldb/include/lldb/Interpreter/CommandInterpreter.h b/lldb/include/lldb/Interpreter/CommandInterpreter.h
index 8863523b2e31f..48f6618ab0e39 100644
--- a/lldb/include/lldb/Interpreter/CommandInterpreter.h
+++ b/lldb/include/lldb/Interpreter/CommandInterpreter.h
@@ -93,15 +93,20 @@ class CommandInterpreterRunOptions {
/// \param[in] add_to_history
/// If \b true add the commands to the command history. If \b false, don't
/// add them.
+ /// \param[in] handle_repeats
+ /// If \b true then treat empty lines as repeat commands even if the
+ /// interpreter is non-interactive.
CommandInterpreterRunOptions(LazyBool stop_on_continue,
LazyBool stop_on_error, LazyBool stop_on_crash,
LazyBool echo_commands, LazyBool echo_comments,
LazyBool print_results, LazyBool print_errors,
- LazyBool add_to_history)
+ LazyBool add_to_history,
+ LazyBool handle_repeats)
: m_stop_on_continue(stop_on_continue), m_stop_on_error(stop_on_error),
m_stop_on_crash(stop_on_crash), m_echo_commands(echo_commands),
m_echo_comment_commands(echo_comments), m_print_results(print_results),
- m_print_errors(print_errors), m_add_to_history(add_to_history) {}
+ m_print_errors(print_errors), m_add_to_history(add_to_history),
+ m_allow_repeats(handle_repeats) {}
CommandInterpreterRunOptions() = default;
@@ -183,6 +188,12 @@ class CommandInterpreterRunOptions {
m_spawn_thread = spawn_thread ? eLazyBoolYes : eLazyBoolNo;
}
+ bool GetAllowRepeats() const { return DefaultToNo(m_allow_repeats); }
+
+ void SetAllowRepeats(bool allow_repeats) {
+ m_allow_repeats = allow_repeats ? eLazyBoolYes : eLazyBoolNo;
+ }
+
LazyBool m_stop_on_continue = eLazyBoolCalculate;
LazyBool m_stop_on_error = eLazyBoolCalculate;
LazyBool m_stop_on_crash = eLazyBoolCalculate;
@@ -193,6 +204,7 @@ class CommandInterpreterRunOptions {
LazyBool m_add_to_history = eLazyBoolCalculate;
LazyBool m_auto_handle_events;
LazyBool m_spawn_thread;
+ LazyBool m_allow_repeats = eLazyBoolCalculate;
private:
static bool DefaultToYes(LazyBool flag) {
diff --git a/lldb/source/API/SBCommandInterpreterRunOptions.cpp b/lldb/source/API/SBCommandInterpreterRunOptions.cpp
index 6c6b2aa15a792..0c7581d6f1f5b 100644
--- a/lldb/source/API/SBCommandInterpreterRunOptions.cpp
+++ b/lldb/source/API/SBCommandInterpreterRunOptions.cpp
@@ -164,6 +164,18 @@ void SBCommandInterpreterRunOptions::SetSpawnThread(bool spawn_thread) {
m_opaque_up->SetSpawnThread(spawn_thread);
}
+bool SBCommandInterpreterRunOptions::GetAllowRepeats() const {
+ LLDB_INSTRUMENT_VA(this);
+
+ return m_opaque_up->GetAllowRepeats();
+}
+
+void SBCommandInterpreterRunOptions::SetAllowRepeats(bool allow_repeats) {
+ LLDB_INSTRUMENT_VA(this, allow_repeats);
+
+ m_opaque_up->SetAllowRepeats(allow_repeats);
+}
+
lldb_private::CommandInterpreterRunOptions *
SBCommandInterpreterRunOptions::get() const {
return m_opaque_up.get();
diff --git a/lldb/source/Interpreter/CommandInterpreter.cpp b/lldb/source/Interpreter/CommandInterpreter.cpp
index acd6294cb3f42..da995de1407c4 100644
--- a/lldb/source/Interpreter/CommandInterpreter.cpp
+++ b/lldb/source/Interpreter/CommandInterpreter.cpp
@@ -2707,7 +2707,8 @@ enum {
eHandleCommandFlagEchoCommentCommand = (1u << 3),
eHandleCommandFlagPrintResult = (1u << 4),
eHandleCommandFlagPrintErrors = (1u << 5),
- eHandleCommandFlagStopOnCrash = (1u << 6)
+ eHandleCommandFlagStopOnCrash = (1u << 6),
+ eHandleCommandFlagAllowRepeats = (1u << 7)
};
void CommandInterpreter::HandleCommandsFromFile(
@@ -3129,14 +3130,19 @@ void CommandInterpreter::IOHandlerInputComplete(IOHandler &io_handler,
return;
const bool is_interactive = io_handler.GetIsInteractive();
- if (!is_interactive) {
+ const bool allow_repeats =
+ io_handler.GetFlags().Test(eHandleCommandFlagAllowRepeats);
+
+ if (!is_interactive && !allow_repeats) {
// When we are not interactive, don't execute blank lines. This will happen
// sourcing a commands file. We don't want blank lines to repeat the
// previous command and cause any errors to occur (like redefining an
// alias, get an error and stop parsing the commands file).
+ // But obey the AllowRepeats flag if the user has set it.
if (line.empty())
return;
-
+ }
+ if (!is_interactive) {
// When using a non-interactive file handle (like when sourcing commands
// from a file) we need to echo the command out so we don't just see the
// command output and no command...
@@ -3388,6 +3394,8 @@ CommandInterpreter::GetIOHandler(bool force_create,
flags |= eHandleCommandFlagPrintResult;
if (options->m_print_errors != eLazyBoolNo)
flags |= eHandleCommandFlagPrintErrors;
+ if (options->m_allow_repeats == eLazyBoolYes)
+ flags |= eHandleCommandFlagAllowRepeats;
} else {
flags = eHandleCommandFlagEchoCommand | eHandleCommandFlagPrintResult |
eHandleCommandFlagPrintErrors;
diff --git a/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py b/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py
index af97493133766..f677b869d1379 100644
--- a/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py
+++ b/lldb/test/API/python_api/interpreter/TestRunCommandInterpreterAPI.py
@@ -47,28 +47,66 @@ def setUp(self):
TestBase.setUp(self)
self.stdin_path = self.getBuildArtifact("stdin.txt")
+ self.stdout_path = self.getBuildArtifact("stdout.txt")
+
+ def run_commands_string(
+ self, command_string, options=lldb.SBCommandInterpreterRunOptions()
+ ):
+ """Run the commands in command_string through RunCommandInterpreter.
+ Returns (n_errors, quit_requested, has_crashed, result_string)."""
with open(self.stdin_path, "w") as input_handle:
- input_handle.write("nonexistingcommand\nquit")
+ input_handle.write(command_string)
- self.dbg.SetInputFile(open(self.stdin_path, "r"))
+ n_errors = 0
+ quit_requested = False
+ has_crashed = False
- # No need to track the output
- devnull = open(os.devnull, "w")
- self.dbg.SetOutputFile(devnull)
- self.dbg.SetErrorFile(devnull)
+ with open(self.stdin_path, "r") as in_fileH, open(
+ self.stdout_path, "w"
+ ) as out_fileH:
+ self.dbg.SetInputFile(in_fileH)
+
+ self.dbg.SetOutputFile(out_fileH)
+ self.dbg.SetErrorFile(out_fileH)
+
+ n_errors, quit_requested, has_crashed = self.dbg.RunCommandInterpreter(
+ True, False, options, 0, False, False
+ )
+
+ result_string = None
+ with open(self.stdout_path, "r") as out_fileH:
+ result_string = out_fileH.read()
+
+ return (n_errors, quit_requested, has_crashed, result_string)
def test_run_session_with_error_and_quit(self):
"""Run non-existing and quit command returns appropriate values"""
- n_errors, quit_requested, has_crashed = self.dbg.RunCommandInterpreter(
- True, False, lldb.SBCommandInterpreterRunOptions(), 0, False, False
+ n_errors, quit_requested, has_crashed, _ = self.run_commands_string(
+ "nonexistingcommand\nquit\n"
)
-
self.assertGreater(n_errors, 0)
self.assertTrue(quit_requested)
self.assertFalse(has_crashed)
+ def test_allow_repeat(self):
+ """Try auto-repeat of process launch - the command will fail and
+ the auto-repeat will fail because of no auto-repeat."""
+ options = lldb.SBCommandInterpreterRunOptions()
+ options.SetEchoCommands(False)
+ options.SetAllowRepeats(True)
+
+ n_errors, quit_requested, has_crashed, result_str = self.run_commands_string(
+ "process launch\n\n", options
+ )
+ self.assertEqual(n_errors, 2)
+ self.assertFalse(quit_requested)
+ self.assertFalse(has_crashed)
+
+ self.assertIn("invalid target", result_str)
+ self.assertIn("No auto repeat", result_str)
+
class SBCommandInterpreterRunOptionsCase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
@@ -86,6 +124,7 @@ def test_command_interpreter_run_options(self):
self.assertTrue(opts.GetPrintResults())
self.assertTrue(opts.GetPrintErrors())
self.assertTrue(opts.GetAddToHistory())
+ self.assertFalse(opts.GetAllowRepeats())
# Invert values
opts.SetStopOnContinue(not opts.GetStopOnContinue())
@@ -95,6 +134,7 @@ def test_command_interpreter_run_options(self):
opts.SetPrintResults(not opts.GetPrintResults())
opts.SetPrintErrors(not opts.GetPrintErrors())
opts.SetAddToHistory(not opts.GetAddToHistory())
+ opts.SetAllowRepeats(not opts.GetAllowRepeats())
# Check the value changed
self.assertTrue(opts.GetStopOnContinue())
@@ -104,3 +144,4 @@ def test_command_interpreter_run_options(self):
self.assertFalse(opts.GetPrintResults())
self.assertFalse(opts.GetPrintErrors())
self.assertFalse(opts.GetAddToHistory())
+ self.assertTrue(opts.GetAllowRepeats())
>From 7124c0a75b198f3741952ab54e54241730ba2cc1 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Fri, 7 Jun 2024 17:25:57 -0700
Subject: [PATCH 16/57] [memprof] Improve deserialization performance in V3
(#94787)
We call llvm::sort in a couple of places in the V3 encoding:
- We sort Frames by FrameIds for stability of the output.
- We sort call stacks in the dictionary order to maximize the length
of the common prefix between adjacent call stacks.
It turns out that we can improve the deserialization performance by
modifying the comparison functions -- without changing the format at
all. Both places take advantage of the histogram of Frames -- how
many times each Frame occurs in the call stacks.
- Frames: We serialize popular Frames in the descending order of
popularity for improved cache locality. For two equally popular
Frames, we break a tie by serializing one that tends to appear
earlier in call stacks. Here, "earlier" means a smaller index
within llvm::SmallVector<FrameId>.
- Call Stacks: We sort the call stacks to reduce the number of times
we follow pointers to parents during deserialization. Specifically,
instead of comparing two call stacks in the strcmp style -- integer
comparisons of FrameIds, we compare two FrameIds F1 and F2 with
Histogram[F1] < Histogram[F2] at respective indexes. Since we
encode from the end of the sorted list of call stacks, we tend to
encode popular call stacks first.
Since the two places use the same histogram, we compute it once and
share it in the two places.
Sorting the call stacks reduces the number of "jumps" by 74% when we
deserialize all MemProfRecords. The cycle and instruction counts go
down by 10% and 1.5%, respectively.
If we sort the Frames in addition to the call stacks, then the cycle
and instruction counts go down by 14% and 1.6%, respectively, relative
to the same baseline (that is, without this patch).
---
llvm/include/llvm/ProfileData/MemProf.h | 20 ++++++-
llvm/lib/ProfileData/InstrProfWriter.cpp | 43 +++++++++++---
llvm/lib/ProfileData/MemProf.cpp | 67 ++++++++++++++++++++--
llvm/unittests/ProfileData/MemProfTest.cpp | 24 ++++++--
4 files changed, 136 insertions(+), 18 deletions(-)
diff --git a/llvm/include/llvm/ProfileData/MemProf.h b/llvm/include/llvm/ProfileData/MemProf.h
index 0e4bb9cc3c6cb..8f5ba9c333320 100644
--- a/llvm/include/llvm/ProfileData/MemProf.h
+++ b/llvm/include/llvm/ProfileData/MemProf.h
@@ -932,6 +932,18 @@ struct IndexedMemProfData {
llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> CallStackData;
};
+struct FrameStat {
+ // The number of occurrences of a given FrameId.
+ uint64_t Count = 0;
+ // The sum of indexes where a given FrameId shows up.
+ uint64_t PositionSum = 0;
+};
+
+// Compute a histogram of Frames in call stacks.
+llvm::DenseMap<FrameId, FrameStat>
+computeFrameHistogram(llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
+ &MemProfCallStackData);
+
// Construct a radix tree of call stacks.
//
// A set of call stacks might look like:
@@ -1027,9 +1039,11 @@ class CallStackRadixTreeBuilder {
CallStackRadixTreeBuilder() = default;
// Build a radix tree array.
- void build(llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
- &&MemProfCallStackData,
- const llvm::DenseMap<FrameId, LinearFrameId> &MemProfFrameIndexes);
+ void
+ build(llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
+ &&MemProfCallStackData,
+ const llvm::DenseMap<FrameId, LinearFrameId> &MemProfFrameIndexes,
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram);
const std::vector<LinearFrameId> &getRadixArray() const { return RadixArray; }
diff --git a/llvm/lib/ProfileData/InstrProfWriter.cpp b/llvm/lib/ProfileData/InstrProfWriter.cpp
index 7d7c980a9e11f..1a9add109a360 100644
--- a/llvm/lib/ProfileData/InstrProfWriter.cpp
+++ b/llvm/lib/ProfileData/InstrProfWriter.cpp
@@ -494,17 +494,40 @@ static uint64_t writeMemProfFrames(
static llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
writeMemProfFrameArray(
ProfOStream &OS,
- llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData) {
+ llvm::MapVector<memprof::FrameId, memprof::Frame> &MemProfFrameData,
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram) {
// Mappings from FrameIds to array indexes.
llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes;
- // Sort the FrameIDs for stability.
+ // Compute the order in which we serialize Frames. The order does not matter
+ // in terms of correctness, but we still compute it for deserialization
+ // performance. Specifically, if we serialize frequently used Frames one
+ // after another, we have better cache utilization. For two Frames that
+ // appear equally frequently, we break a tie by serializing the one that tends
+ // to appear earlier in call stacks. We implement the tie-breaking mechanism
+ // by computing the sum of indexes within call stacks for each Frame. If we
+ // still have a tie, then we just resort to compare two FrameIds, which is
+ // just for stability of output.
std::vector<std::pair<memprof::FrameId, const memprof::Frame *>> FrameIdOrder;
FrameIdOrder.reserve(MemProfFrameData.size());
for (const auto &[Id, Frame] : MemProfFrameData)
FrameIdOrder.emplace_back(Id, &Frame);
assert(MemProfFrameData.size() == FrameIdOrder.size());
- llvm::sort(FrameIdOrder);
+ llvm::sort(FrameIdOrder,
+ [&](const std::pair<memprof::FrameId, const memprof::Frame *> &L,
+ const std::pair<memprof::FrameId, const memprof::Frame *> &R) {
+ const auto &SL = FrameHistogram[L.first];
+ const auto &SR = FrameHistogram[R.first];
+ // Popular FrameIds should come first.
+ if (SL.Count != SR.Count)
+ return SL.Count > SR.Count;
+ // If they are equally popular, then the one that tends to appear
+ // earlier in call stacks should come first.
+ if (SL.PositionSum != SR.PositionSum)
+ return SL.PositionSum < SR.PositionSum;
+ // Compare their FrameIds for sort stability.
+ return L.first < R.first;
+ });
// Serialize all frames while creating mappings from linear IDs to FrameIds.
uint64_t Index = 0;
@@ -543,12 +566,14 @@ writeMemProfCallStackArray(
llvm::MapVector<memprof::CallStackId, llvm::SmallVector<memprof::FrameId>>
&MemProfCallStackData,
llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId>
- &MemProfFrameIndexes) {
+ &MemProfFrameIndexes,
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram) {
llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
MemProfCallStackIndexes;
memprof::CallStackRadixTreeBuilder Builder;
- Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes);
+ Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes,
+ FrameHistogram);
for (auto I : Builder.getRadixArray())
OS.write32(I);
MemProfCallStackIndexes = Builder.takeCallStackPos();
@@ -704,13 +729,17 @@ static Error writeMemProfV3(ProfOStream &OS,
Schema = memprof::getFullSchema();
writeMemProfSchema(OS, Schema);
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> FrameHistogram =
+ memprof::computeFrameHistogram(MemProfData.CallStackData);
+ assert(MemProfData.FrameData.size() == FrameHistogram.size());
+
llvm::DenseMap<memprof::FrameId, memprof::LinearFrameId> MemProfFrameIndexes =
- writeMemProfFrameArray(OS, MemProfData.FrameData);
+ writeMemProfFrameArray(OS, MemProfData.FrameData, FrameHistogram);
uint64_t CallStackPayloadOffset = OS.tell();
llvm::DenseMap<memprof::CallStackId, memprof::LinearCallStackId>
MemProfCallStackIndexes = writeMemProfCallStackArray(
- OS, MemProfData.CallStackData, MemProfFrameIndexes);
+ OS, MemProfData.CallStackData, MemProfFrameIndexes, FrameHistogram);
uint64_t RecordPayloadOffset = OS.tell();
uint64_t RecordTableOffset =
diff --git a/llvm/lib/ProfileData/MemProf.cpp b/llvm/lib/ProfileData/MemProf.cpp
index 620e2e2d71a0f..8e3053748c087 100644
--- a/llvm/lib/ProfileData/MemProf.cpp
+++ b/llvm/lib/ProfileData/MemProf.cpp
@@ -486,7 +486,8 @@ LinearCallStackId CallStackRadixTreeBuilder::encodeCallStack(
void CallStackRadixTreeBuilder::build(
llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
&&MemProfCallStackData,
- const llvm::DenseMap<FrameId, LinearFrameId> &MemProfFrameIndexes) {
+ const llvm::DenseMap<FrameId, LinearFrameId> &MemProfFrameIndexes,
+ llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram) {
// Take the vector portion of MemProfCallStackData. The vector is exactly
// what we need to sort. Also, we no longer need its lookup capability.
llvm::SmallVector<CSIdPair, 0> CallStacks = MemProfCallStackData.takeVector();
@@ -498,14 +499,56 @@ void CallStackRadixTreeBuilder::build(
return;
}
- // Sort the list of call stacks in the dictionary order to maximize the length
- // of the common prefix between two adjacent call stacks.
+ // Sorting the list of call stacks in the dictionary order is sufficient to
+ // maximize the length of the common prefix between two adjacent call stacks
+ // and thus minimize the length of RadixArray. However, we go one step
+ // further and try to reduce the number of times we follow pointers to parents
+ // during deserilization. Consider a poorly encoded radix tree:
+ //
+ // CallStackId 1: f1 -> f2 -> f3
+ // |
+ // CallStackId 2: +--- f4 -> f5
+ // |
+ // CallStackId 3: +--> f6
+ //
+ // Here, f2 and f4 appear once and twice, respectively, in the call stacks.
+ // Once we encode CallStackId 1 into RadixArray, every other call stack with
+ // common prefix f1 ends up pointing to CallStackId 1. Since CallStackId 3
+ // share "f1 f4" with CallStackId 2, CallStackId 3 needs to follow pointers to
+ // parents twice.
+ //
+ // We try to alleviate the situation by sorting the list of call stacks by
+ // comparing the popularity of frames rather than the integer values of
+ // FrameIds. In the example above, f4 is more popular than f2, so we sort the
+ // call stacks and encode them as:
+ //
+ // CallStackId 2: f1 -- f4 -> f5
+ // | |
+ // CallStackId 3: | +--> f6
+ // |
+ // CallStackId 1: +--> f2 -> f3
+ //
+ // Notice that CallStackId 3 follows a pointer to a parent only once.
+ //
+ // All this is a quick-n-dirty trick to reduce the number of jumps. The
+ // proper way would be to compute the weight of each radix tree node -- how
+ // many call stacks use a given radix tree node, and encode a radix tree from
+ // the heaviest node first. We do not do so because that's a lot of work.
llvm::sort(CallStacks, [&](const CSIdPair &L, const CSIdPair &R) {
// Call stacks are stored from leaf to root. Perform comparisons from the
// root.
return std::lexicographical_compare(
L.second.rbegin(), L.second.rend(), R.second.rbegin(), R.second.rend(),
- [&](FrameId F1, FrameId F2) { return F1 < F2; });
+ [&](FrameId F1, FrameId F2) {
+ uint64_t H1 = FrameHistogram[F1].Count;
+ uint64_t H2 = FrameHistogram[F2].Count;
+ // Popular frames should come later because we encode call stacks from
+ // the last one in the list.
+ if (H1 != H2)
+ return H1 < H2;
+ // For sort stability.
+ return F1 < F2;
+ });
});
// Reserve some reasonable amount of storage.
@@ -569,6 +612,22 @@ void CallStackRadixTreeBuilder::build(
V = RadixArray.size() - 1 - V;
}
+llvm::DenseMap<FrameId, FrameStat>
+computeFrameHistogram(llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
+ &MemProfCallStackData) {
+ llvm::DenseMap<FrameId, FrameStat> Histogram;
+
+ for (const auto &KV : MemProfCallStackData) {
+ const auto &CS = KV.second;
+ for (unsigned I = 0, E = CS.size(); I != E; ++I) {
+ auto &S = Histogram[CS[I]];
+ ++S.Count;
+ S.PositionSum += I;
+ }
+ }
+ return Histogram;
+}
+
void verifyIndexedMemProfRecord(const IndexedMemProfRecord &Record) {
for (const auto &AS : Record.AllocSites) {
assert(AS.CSId == hashCallStack(AS.CallStack));
diff --git a/llvm/unittests/ProfileData/MemProfTest.cpp b/llvm/unittests/ProfileData/MemProfTest.cpp
index 26421200e1a11..15eb59ee00c94 100644
--- a/llvm/unittests/ProfileData/MemProfTest.cpp
+++ b/llvm/unittests/ProfileData/MemProfTest.cpp
@@ -667,8 +667,12 @@ TEST(MemProf, MissingFrameId) {
TEST(MemProf, RadixTreeBuilderEmpty) {
llvm::DenseMap<FrameId, llvm::memprof::LinearFrameId> MemProfFrameIndexes;
llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData;
+ llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
+ FrameHistogram =
+ llvm::memprof::computeFrameHistogram(MemProfCallStackData);
llvm::memprof::CallStackRadixTreeBuilder Builder;
- Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes);
+ Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes,
+ FrameHistogram);
ASSERT_THAT(Builder.getRadixArray(), testing::IsEmpty());
const auto Mappings = Builder.takeCallStackPos();
ASSERT_THAT(Mappings, testing::IsEmpty());
@@ -681,8 +685,12 @@ TEST(MemProf, RadixTreeBuilderOne) {
llvm::SmallVector<llvm::memprof::FrameId> CS1 = {13, 12, 11};
llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData;
MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1});
+ llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
+ FrameHistogram =
+ llvm::memprof::computeFrameHistogram(MemProfCallStackData);
llvm::memprof::CallStackRadixTreeBuilder Builder;
- Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes);
+ Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes,
+ FrameHistogram);
EXPECT_THAT(Builder.getRadixArray(), testing::ElementsAreArray({
3U, // Size of CS1,
3U, // MemProfFrameIndexes[13]
@@ -704,8 +712,12 @@ TEST(MemProf, RadixTreeBuilderTwo) {
llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>> MemProfCallStackData;
MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS1), CS1});
MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2});
+ llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
+ FrameHistogram =
+ llvm::memprof::computeFrameHistogram(MemProfCallStackData);
llvm::memprof::CallStackRadixTreeBuilder Builder;
- Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes);
+ Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes,
+ FrameHistogram);
EXPECT_THAT(Builder.getRadixArray(),
testing::ElementsAreArray({
2U, // Size of CS1
@@ -738,8 +750,12 @@ TEST(MemProf, RadixTreeBuilderSuccessiveJumps) {
MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS2), CS2});
MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS3), CS3});
MemProfCallStackData.insert({llvm::memprof::hashCallStack(CS4), CS4});
+ llvm::DenseMap<llvm::memprof::FrameId, llvm::memprof::FrameStat>
+ FrameHistogram =
+ llvm::memprof::computeFrameHistogram(MemProfCallStackData);
llvm::memprof::CallStackRadixTreeBuilder Builder;
- Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes);
+ Builder.build(std::move(MemProfCallStackData), MemProfFrameIndexes,
+ FrameHistogram);
EXPECT_THAT(Builder.getRadixArray(),
testing::ElementsAreArray({
4U, // Size of CS1
>From d9e5e2a315ff3a2e27cde123d94c9939ebacb7db Mon Sep 17 00:00:00 2001
From: csstormq <swust_xiaoqiangxu at 163.com>
Date: Sat, 8 Jun 2024 08:38:27 +0800
Subject: [PATCH 17/57] [InstCombine] Preserve the nsw/nuw flags for (X |
Op01C) + Op1C --> X + (Op01C + Op1C) (#94586)
This patch simplifies `sdiv` to `udiv` by preserving the `nsw` flag for
`(X | Op01C) + Op1C --> X + (Op01C + Op1C)` if the sum of `Op01C` and
`Op1C` will not overflow, and preserves the `nuw` flag unconditionally.
Alive2 Proofs (provided by @nikic): https://alive2.llvm.org/ce/z/nrdCZT,
https://alive2.llvm.org/ce/z/YnJHnH
---
.../InstCombine/InstCombineAddSub.cpp | 10 ++++-
llvm/test/Transforms/InstCombine/add.ll | 40 +++++++++++++++++++
.../InstCombine/sadd-with-overflow.ll | 2 +-
.../AArch64/matrix-extract-insert.ll | 18 ++++-----
4 files changed, 58 insertions(+), 12 deletions(-)
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 8205b49dfbe2f..0a73c58c07409 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -905,8 +905,14 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
// (X | Op01C) + Op1C --> X + (Op01C + Op1C) iff the `or` is actually an `add`
Constant *Op01C;
- if (match(Op0, m_DisjointOr(m_Value(X), m_ImmConstant(Op01C))))
- return BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
+ if (match(Op0, m_DisjointOr(m_Value(X), m_ImmConstant(Op01C)))) {
+ BinaryOperator *NewAdd =
+ BinaryOperator::CreateAdd(X, ConstantExpr::getAdd(Op01C, Op1C));
+ NewAdd->setHasNoSignedWrap(Add.hasNoSignedWrap() &&
+ willNotOverflowSignedAdd(Op01C, Op1C, Add));
+ NewAdd->setHasNoUnsignedWrap(Add.hasNoUnsignedWrap());
+ return NewAdd;
+ }
// (X | C2) + C --> (X | C2) ^ C2 iff (C2 == -C)
const APInt *C2;
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index 25087fef68a11..239e14682c306 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -1510,6 +1510,46 @@ define i8 @add_like_or_t2_extrause(i8 %x) {
%r = add i8 %i1, 42
ret i8 %r
}
+define i8 @fold_add_constant_preserve_nsw(i8 %x) {
+; CHECK-LABEL: @fold_add_constant_preserve_nsw(
+; CHECK-NEXT: [[ADD:%.*]] = add nsw i8 [[X:%.*]], -120
+; CHECK-NEXT: ret i8 [[ADD]]
+;
+ %or = or disjoint i8 %x, -128
+ %add = add nsw i8 %or, 8
+ ret i8 %add
+}
+define i8 @fold_add_constant_no_nsw(i8 %x) {
+; CHECK-LABEL: @fold_add_constant_no_nsw(
+; CHECK-NEXT: [[ADD:%.*]] = add i8 [[X:%.*]], 120
+; CHECK-NEXT: ret i8 [[ADD]]
+;
+ %or = or disjoint i8 %x, -128
+ %add = add nsw i8 %or, -8
+ ret i8 %add
+}
+define i8 @fold_add_constant_preserve_nuw(i8 %x) {
+; CHECK-LABEL: @fold_add_constant_preserve_nuw(
+; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[X:%.*]], -116
+; CHECK-NEXT: ret i8 [[ADD]]
+;
+ %or = or disjoint i8 %x, 128
+ %add = add nuw i8 %or, 12
+ ret i8 %add
+}
+define i32 @sdiv_to_udiv(i32 %arg0, i32 %arg1) {
+; CHECK-LABEL: @sdiv_to_udiv(
+; CHECK-NEXT: [[T0:%.*]] = shl nuw nsw i32 [[ARG0:%.*]], 8
+; CHECK-NEXT: [[T2:%.*]] = add nuw nsw i32 [[T0]], 6242049
+; CHECK-NEXT: [[T3:%.*]] = udiv i32 [[T2]], 192
+; CHECK-NEXT: ret i32 [[T3]]
+;
+ %t0 = shl nuw nsw i32 %arg0, 8
+ %t1 = or disjoint i32 %t0, 1
+ %t2 = add nuw nsw i32 %t1, 6242048
+ %t3 = sdiv i32 %t2, 192
+ ret i32 %t3
+}
define i8 @add_like_or_disjoint(i8 %x) {
; CHECK-LABEL: @add_like_or_disjoint(
diff --git a/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll b/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
index 729ca03ddfd15..e4dd2d10637d3 100644
--- a/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
+++ b/llvm/test/Transforms/InstCombine/sadd-with-overflow.ll
@@ -125,7 +125,7 @@ define { i32, i1 } @fold_sub_simple(i32 %x) {
define { i32, i1 } @fold_with_distjoin_or(i32 %x) {
; CHECK-LABEL: @fold_with_distjoin_or(
-; CHECK-NEXT: [[B:%.*]] = add i32 [[X:%.*]], 6
+; CHECK-NEXT: [[B:%.*]] = add nsw i32 [[X:%.*]], 6
; CHECK-NEXT: [[TMP1:%.*]] = insertvalue { i32, i1 } { i32 poison, i1 false }, i32 [[B]], 0
; CHECK-NEXT: ret { i32, i1 } [[TMP1]]
;
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
index 5cbf50e06fbe8..db16413cdc94a 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
@@ -182,11 +182,11 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.1:
; CHECK-NEXT: [[INDEX_1:%.*]] = phi i64 [ 0, [[VECTOR_PH_1]] ], [ [[INDEX_NEXT_1:%.*]], [[VECTOR_BODY_1]] ]
; CHECK-NEXT: [[TMP33:%.*]] = add nuw nsw i64 [[INDEX_1]], 15
-; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[INDEX_1]], 16
+; CHECK-NEXT: [[TMP34:%.*]] = add nuw nsw i64 [[INDEX_1]], 16
; CHECK-NEXT: [[TMP35:%.*]] = insertelement <2 x i64> poison, i64 [[TMP33]], i64 0
; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i64> [[TMP35]], i64 [[TMP34]], i64 1
-; CHECK-NEXT: [[TMP37:%.*]] = add i64 [[INDEX_1]], 17
-; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[INDEX_1]], 18
+; CHECK-NEXT: [[TMP37:%.*]] = add nuw nsw i64 [[INDEX_1]], 17
+; CHECK-NEXT: [[TMP38:%.*]] = add nuw nsw i64 [[INDEX_1]], 18
; CHECK-NEXT: [[TMP39:%.*]] = insertelement <2 x i64> poison, i64 [[TMP37]], i64 0
; CHECK-NEXT: [[TMP40:%.*]] = insertelement <2 x i64> [[TMP39]], i64 [[TMP38]], i64 1
; CHECK-NEXT: [[TMP41:%.*]] = icmp ult <2 x i64> [[TMP36]], <i64 225, i64 225>
@@ -259,11 +259,11 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.2:
; CHECK-NEXT: [[INDEX_2:%.*]] = phi i64 [ 0, [[VECTOR_PH_2]] ], [ [[INDEX_NEXT_2:%.*]], [[VECTOR_BODY_2]] ]
; CHECK-NEXT: [[TMP64:%.*]] = add nuw nsw i64 [[INDEX_2]], 30
-; CHECK-NEXT: [[TMP65:%.*]] = add i64 [[INDEX_2]], 31
+; CHECK-NEXT: [[TMP65:%.*]] = add nuw nsw i64 [[INDEX_2]], 31
; CHECK-NEXT: [[TMP66:%.*]] = insertelement <2 x i64> poison, i64 [[TMP64]], i64 0
; CHECK-NEXT: [[TMP67:%.*]] = insertelement <2 x i64> [[TMP66]], i64 [[TMP65]], i64 1
-; CHECK-NEXT: [[TMP68:%.*]] = add i64 [[INDEX_2]], 32
-; CHECK-NEXT: [[TMP69:%.*]] = add i64 [[INDEX_2]], 33
+; CHECK-NEXT: [[TMP68:%.*]] = add nuw nsw i64 [[INDEX_2]], 32
+; CHECK-NEXT: [[TMP69:%.*]] = add nuw nsw i64 [[INDEX_2]], 33
; CHECK-NEXT: [[TMP70:%.*]] = insertelement <2 x i64> poison, i64 [[TMP68]], i64 0
; CHECK-NEXT: [[TMP71:%.*]] = insertelement <2 x i64> [[TMP70]], i64 [[TMP69]], i64 1
; CHECK-NEXT: [[TMP72:%.*]] = icmp ult <2 x i64> [[TMP67]], <i64 225, i64 225>
@@ -336,11 +336,11 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK: vector.body.3:
; CHECK-NEXT: [[INDEX_3:%.*]] = phi i64 [ 0, [[VECTOR_PH_3]] ], [ [[INDEX_NEXT_3:%.*]], [[VECTOR_BODY_3]] ]
; CHECK-NEXT: [[TMP95:%.*]] = add nuw nsw i64 [[INDEX_3]], 45
-; CHECK-NEXT: [[TMP96:%.*]] = add i64 [[INDEX_3]], 46
+; CHECK-NEXT: [[TMP96:%.*]] = add nuw nsw i64 [[INDEX_3]], 46
; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i64> poison, i64 [[TMP95]], i64 0
; CHECK-NEXT: [[TMP98:%.*]] = insertelement <2 x i64> [[TMP97]], i64 [[TMP96]], i64 1
-; CHECK-NEXT: [[TMP99:%.*]] = add i64 [[INDEX_3]], 47
-; CHECK-NEXT: [[TMP100:%.*]] = add i64 [[INDEX_3]], 48
+; CHECK-NEXT: [[TMP99:%.*]] = add nuw nsw i64 [[INDEX_3]], 47
+; CHECK-NEXT: [[TMP100:%.*]] = add nuw nsw i64 [[INDEX_3]], 48
; CHECK-NEXT: [[TMP101:%.*]] = insertelement <2 x i64> poison, i64 [[TMP99]], i64 0
; CHECK-NEXT: [[TMP102:%.*]] = insertelement <2 x i64> [[TMP101]], i64 [[TMP100]], i64 1
; CHECK-NEXT: [[TMP103:%.*]] = icmp ult <2 x i64> [[TMP98]], <i64 225, i64 225>
>From 7221a57a4db5704bf260f02497cbcf64f1a374e2 Mon Sep 17 00:00:00 2001
From: Paul Kirth <paulkirth at google.com>
Date: Fri, 7 Jun 2024 17:56:35 -0700
Subject: [PATCH 18/57] [lld] Discard SHT_LLVM_LTO sections in relocatable
links (#92825)
So long as ld -r links using bitcode always result in an ELF object, and
not a merged bitcode object, the output form a relocatable link using
FatLTO objects should not have a .llvm.lto section. Prior to this, using
the object code sections would cause the bitcode section in the output
of a relocatable link to be corrupted, by concatenating all the
.llvm.lto
sections together.
This patch discards SHT_LLVM_LTO sections when not using
--fat-lto-objects, so that the relocatable ELF output won't contain
inalid bitcode.
---
lld/ELF/InputFiles.cpp | 10 ++++++++++
lld/test/ELF/fatlto/fatlto.test | 17 ++++-------------
2 files changed, 14 insertions(+), 13 deletions(-)
diff --git a/lld/ELF/InputFiles.cpp b/lld/ELF/InputFiles.cpp
index 9021bbd91b5f7..e6a0a5be821e0 100644
--- a/lld/ELF/InputFiles.cpp
+++ b/lld/ELF/InputFiles.cpp
@@ -844,6 +844,16 @@ void ObjFile<ELFT>::initializeSections(bool ignoreComdats,
this->sections[i] =
createInputSection(i, sec, check(obj.getSectionName(sec, shstrtab)));
break;
+ case SHT_LLVM_LTO:
+ // Discard .llvm.lto in a relocatable link that does not use the bitcode.
+ // The concatenated output does not properly reflect the linking
+ // semantics. In addition, since we do not use the bitcode wrapper format,
+ // the concatenated raw bitcode would be invalid.
+ if (config->relocatable && !config->fatLTOObjects) {
+ sections[i] = &InputSection::discarded;
+ break;
+ }
+ [[fallthrough]];
default:
this->sections[i] =
createInputSection(i, sec, check(obj.getSectionName(sec, shstrtab)));
diff --git a/lld/test/ELF/fatlto/fatlto.test b/lld/test/ELF/fatlto/fatlto.test
index ed137087746e9..7ec094d935cc5 100644
--- a/lld/test/ELF/fatlto/fatlto.test
+++ b/lld/test/ELF/fatlto/fatlto.test
@@ -8,7 +8,6 @@
; RUN: opt < a-LTO.ll --module-summary -o a-fatLTO.bc
; RUN: llvm-objcopy --add-section=.llvm.lto=a-fatLTO.bc --set-section-flags=.llvm.lto=exclude --set-section-type=.llvm.lto=0x6fff4c0c a-fatLTO.o
-
; RUN: llc main-LTO.ll --filetype=obj -o main-fatLTO.o --relocation-model=pic
; RUN: opt < main-LTO.ll --module-summary -o main-fatLTO.bc
; RUN: llvm-objcopy --add-section=.llvm.lto=main-fatLTO.bc --set-section-flags=.llvm.lto=exclude --set-section-type=.llvm.lto=0x6fff4c0c main-fatLTO.o
@@ -17,11 +16,6 @@
; RUN: llvm-readelf -S main-fatLTO.o | FileCheck --check-prefix=HAS_LLVM_LTO %s
;; Make sure that the section flags are set correctly
-; HA_LLVM_LTO: Name: .llvm.lto
-; HA_LLVM_LTO-NEXT: Type: SHT_LLVM_LTO
-; HA_LLVM_LTO-NEXT: Flags
-; HA_LLVM_LTO-NEXT: SHF_EXCLUDE
-
; HAS_LLVM_LTO: Name Type Address Off Size ES Flg Lk Inf Al
; HAS_LLVM_LTO: .llvm.lto LLVM_LTO {{.*}} 00 WE 0 0 1
@@ -64,16 +58,13 @@
; RUN: ld.lld -o foo-fatLTO.archive a.a main-LTO.bc --fat-lto-objects
; RUN: cmp foo-fatLTO.archive foo-LTO
-;; Test FatLTO works with relocatable links using PIC objects
-;; Currently, with PIC relocatable links, FatLTO sections are treated as
-;; orphan sections and incorrectly concatenated together. This test verifies
-;; the current behavior, but should be fixed to either merge those sections
-;; correctly, or to drop them altogether.
+;; Test FatLTO works with relocatable links using PIC objects, and that
+;; SHT_LLVM_LTO sections are discarded.
; RUN: llvm-ar rcs fatLTO-pic.a a-fatLTO.o main-fatLTO.o
; RUN: llvm-readelf -S fatLTO-pic.a | FileCheck --check-prefix=HAS_LLVM_LTO %s
-; RUN: ld.lld --whole-archive fatLTO-pic.a -r -o fatLTO-pic-reolcatable.o
-; RUN: llvm-readelf -S fatLTO-pic-reolcatable.o | FileCheck --check-prefix=HAS_LLVM_LTO %s
+; RUN: ld.lld --whole-archive fatLTO-pic.a -r -o fatLTO-pic-relocatable.o
+; RUN: llvm-readelf -S fatLTO-pic-relocatable.o | FileCheck --check-prefix=CHECK-NON-LTO-TARGET %s
;--- a-LTO.ll
target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
>From 07848d5a3f8e3ef2e22d25d47526e0108195cf00 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Fri, 7 Jun 2024 18:18:31 -0700
Subject: [PATCH 19/57] [ProfileData] Use default member initialization (NFC)
(#94817)
While we are at it, this patch changes the type of ValueCounts to
std:array<double, ...> so that we can use std::array:fill.
Identified with modernize-use-default-member-init.
---
llvm/include/llvm/ProfileData/InstrProf.h | 23 ++++++++++-------------
1 file changed, 10 insertions(+), 13 deletions(-)
diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h
index 15b9eb688e27e..61bf9492acb8e 100644
--- a/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/llvm/include/llvm/ProfileData/InstrProf.h
@@ -737,15 +737,14 @@ GlobalVariable *InstrProfSymtab::getGlobalVariable(uint64_t MD5Hash) {
// To store the sums of profile count values, or the percentage of
// the sums of the total count values.
struct CountSumOrPercent {
- uint64_t NumEntries;
- double CountSum;
- double ValueCounts[IPVK_Last - IPVK_First + 1];
- CountSumOrPercent() : NumEntries(0), CountSum(0.0f), ValueCounts() {}
+ uint64_t NumEntries = 0;
+ double CountSum = 0.0f;
+ std::array<double, IPVK_Last - IPVK_First + 1> ValueCounts = {};
+ CountSumOrPercent() = default;
void reset() {
NumEntries = 0;
CountSum = 0.0f;
- for (double &VC : ValueCounts)
- VC = 0.0f;
+ ValueCounts.fill(0.0f);
}
};
@@ -761,15 +760,13 @@ struct OverlapStats {
CountSumOrPercent Mismatch;
CountSumOrPercent Unique;
OverlapStatsLevel Level;
- const std::string *BaseFilename;
- const std::string *TestFilename;
+ const std::string *BaseFilename = nullptr;
+ const std::string *TestFilename = nullptr;
StringRef FuncName;
- uint64_t FuncHash;
- bool Valid;
+ uint64_t FuncHash = 0;
+ bool Valid = false;
- OverlapStats(OverlapStatsLevel L = ProgramLevel)
- : Level(L), BaseFilename(nullptr), TestFilename(nullptr), FuncHash(0),
- Valid(false) {}
+ OverlapStats(OverlapStatsLevel L = ProgramLevel) : Level(L) {}
void dump(raw_fd_ostream &OS) const;
>From ff428e73dbc5d8d7e18beecc96ea22c09b4d6a4c Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Fri, 7 Jun 2024 18:25:49 -0700
Subject: [PATCH 20/57] [ProfileData] Use DenseMap::lookup (NFC) (#94818)
---
llvm/include/llvm/ProfileData/InstrProf.h | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/llvm/include/llvm/ProfileData/InstrProf.h b/llvm/include/llvm/ProfileData/InstrProf.h
index 61bf9492acb8e..d6831eeaa794b 100644
--- a/llvm/include/llvm/ProfileData/InstrProf.h
+++ b/llvm/include/llvm/ProfileData/InstrProf.h
@@ -729,9 +729,7 @@ Function* InstrProfSymtab::getFunction(uint64_t FuncMD5Hash) {
}
GlobalVariable *InstrProfSymtab::getGlobalVariable(uint64_t MD5Hash) {
- if (auto Iter = MD5VTableMap.find(MD5Hash); Iter != MD5VTableMap.end())
- return Iter->second;
- return nullptr;
+ return MD5VTableMap.lookup(MD5Hash);
}
// To store the sums of profile count values, or the percentage of
>From 2161567bae6f654429f6559e57604cc3af004b55 Mon Sep 17 00:00:00 2001
From: Nico Weber <thakis at chromium.org>
Date: Fri, 7 Jun 2024 21:24:25 -0400
Subject: [PATCH 21/57] [gn build] Port 37e309f16354 (AArch64 loopvectorize)
---
llvm/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn | 2 +-
llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn | 1 +
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn
index 879b7f0960a90..2ffe83da90eed 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AArch64/BUILD.gn
@@ -103,6 +103,7 @@ static_library("LLVMAArch64CodeGen") {
"//llvm/lib/Transforms/CFGuard",
"//llvm/lib/Transforms/Scalar",
"//llvm/lib/Transforms/Utils",
+ "//llvm/lib/Transforms/Vectorize",
]
include_dirs = [ "." ]
sources = [
@@ -130,7 +131,6 @@ static_library("LLVMAArch64CodeGen") {
"AArch64ISelLowering.cpp",
"AArch64InstrInfo.cpp",
"AArch64LoadStoreOptimizer.cpp",
- "AArch64LoopIdiomTransform.cpp",
"AArch64LowerHomogeneousPrologEpilog.cpp",
"AArch64MCInstLower.cpp",
"AArch64MIPeepholeOpt.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn
index 044b781e7c31f..92337a547eaa3 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Transforms/Vectorize/BUILD.gn
@@ -8,6 +8,7 @@ static_library("Vectorize") {
]
sources = [
"LoadStoreVectorizer.cpp",
+ "LoopIdiomVectorize.cpp",
"LoopVectorizationLegality.cpp",
"LoopVectorize.cpp",
"SLPVectorizer.cpp",
>From 3f2aff009dd9746c8ec58918387d2b31330594ed Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Fri, 7 Jun 2024 18:32:58 -0700
Subject: [PATCH 22/57] [memprof] Remove extraneous memprof:: (NFC) (#94825)
---
llvm/include/llvm/ProfileData/MemProf.h | 20 +++++++++----------
llvm/include/llvm/ProfileData/MemProfReader.h | 4 ++--
llvm/lib/ProfileData/MemProf.cpp | 8 ++++----
3 files changed, 15 insertions(+), 17 deletions(-)
diff --git a/llvm/include/llvm/ProfileData/MemProf.h b/llvm/include/llvm/ProfileData/MemProf.h
index 8f5ba9c333320..a6501493172b2 100644
--- a/llvm/include/llvm/ProfileData/MemProf.h
+++ b/llvm/include/llvm/ProfileData/MemProf.h
@@ -439,7 +439,7 @@ struct IndexedMemProfRecord {
// on the schema provided in \p Schema.
void serialize(const MemProfSchema &Schema, raw_ostream &OS,
IndexedVersion Version,
- llvm::DenseMap<memprof::CallStackId, LinearCallStackId>
+ llvm::DenseMap<CallStackId, LinearCallStackId>
*MemProfCallStackIndexes = nullptr) const;
// Deserializes memprof records from the Buffer.
@@ -579,15 +579,14 @@ class RecordWriterTrait {
IndexedVersion Version;
// Mappings from CallStackId to the indexes into the call stack array.
- llvm::DenseMap<memprof::CallStackId, LinearCallStackId>
- *MemProfCallStackIndexes;
+ llvm::DenseMap<CallStackId, LinearCallStackId> *MemProfCallStackIndexes;
public:
// We do not support the default constructor, which does not set Version.
RecordWriterTrait() = delete;
- RecordWriterTrait(const MemProfSchema *Schema, IndexedVersion V,
- llvm::DenseMap<memprof::CallStackId, LinearCallStackId>
- *MemProfCallStackIndexes)
+ RecordWriterTrait(
+ const MemProfSchema *Schema, IndexedVersion V,
+ llvm::DenseMap<CallStackId, LinearCallStackId> *MemProfCallStackIndexes)
: Schema(Schema), Version(V),
MemProfCallStackIndexes(MemProfCallStackIndexes) {}
@@ -1039,11 +1038,10 @@ class CallStackRadixTreeBuilder {
CallStackRadixTreeBuilder() = default;
// Build a radix tree array.
- void
- build(llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
- &&MemProfCallStackData,
- const llvm::DenseMap<FrameId, LinearFrameId> &MemProfFrameIndexes,
- llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram);
+ void build(llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
+ &&MemProfCallStackData,
+ const llvm::DenseMap<FrameId, LinearFrameId> &MemProfFrameIndexes,
+ llvm::DenseMap<FrameId, FrameStat> &FrameHistogram);
const std::vector<LinearFrameId> &getRadixArray() const { return RadixArray; }
diff --git a/llvm/include/llvm/ProfileData/MemProfReader.h b/llvm/include/llvm/ProfileData/MemProfReader.h
index b42e4f5977740..f0286820fa462 100644
--- a/llvm/include/llvm/ProfileData/MemProfReader.h
+++ b/llvm/include/llvm/ProfileData/MemProfReader.h
@@ -76,8 +76,8 @@ class MemProfReader {
Callback =
std::bind(&MemProfReader::idToFrame, this, std::placeholders::_1);
- memprof::CallStackIdConverter<decltype(CSIdToCallStack)> CSIdConv(
- CSIdToCallStack, Callback);
+ CallStackIdConverter<decltype(CSIdToCallStack)> CSIdConv(CSIdToCallStack,
+ Callback);
const IndexedMemProfRecord &IndexedRecord = Iter->second;
GuidRecord = {
diff --git a/llvm/lib/ProfileData/MemProf.cpp b/llvm/lib/ProfileData/MemProf.cpp
index 8e3053748c087..4ca868722ec4e 100644
--- a/llvm/lib/ProfileData/MemProf.cpp
+++ b/llvm/lib/ProfileData/MemProf.cpp
@@ -343,15 +343,15 @@ MemProfRecord IndexedMemProfRecord::toMemProfRecord(
MemProfRecord Record;
Record.AllocSites.reserve(AllocSites.size());
- for (const memprof::IndexedAllocationInfo &IndexedAI : AllocSites) {
- memprof::AllocationInfo AI;
+ for (const IndexedAllocationInfo &IndexedAI : AllocSites) {
+ AllocationInfo AI;
AI.Info = IndexedAI.Info;
AI.CallStack = Callback(IndexedAI.CSId);
Record.AllocSites.push_back(std::move(AI));
}
Record.CallSites.reserve(CallSiteIds.size());
- for (memprof::CallStackId CSId : CallSiteIds)
+ for (CallStackId CSId : CallSiteIds)
Record.CallSites.push_back(Callback(CSId));
return Record;
@@ -487,7 +487,7 @@ void CallStackRadixTreeBuilder::build(
llvm::MapVector<CallStackId, llvm::SmallVector<FrameId>>
&&MemProfCallStackData,
const llvm::DenseMap<FrameId, LinearFrameId> &MemProfFrameIndexes,
- llvm::DenseMap<memprof::FrameId, memprof::FrameStat> &FrameHistogram) {
+ llvm::DenseMap<FrameId, FrameStat> &FrameHistogram) {
// Take the vector portion of MemProfCallStackData. The vector is exactly
// what we need to sort. Also, we no longer need its lookup capability.
llvm::SmallVector<CSIdPair, 0> CallStacks = MemProfCallStackData.takeVector();
>From bf50886cae24c25cf36e37c5055acdf28f2f99b7 Mon Sep 17 00:00:00 2001
From: LLVM GN Syncbot <llvmgnsyncbot at gmail.com>
Date: Sat, 8 Jun 2024 01:33:19 +0000
Subject: [PATCH 23/57] [gn build] Port c4f83a004bf3
---
.../gn/secondary/clang-tools-extra/clang-tidy/misc/BUILD.gn | 1 +
1 file changed, 1 insertion(+)
diff --git a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/misc/BUILD.gn b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/misc/BUILD.gn
index 36957f502c323..0dc5efc981c87 100644
--- a/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/misc/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang-tools-extra/clang-tidy/misc/BUILD.gn
@@ -55,5 +55,6 @@ static_library("misc") {
"UnusedParametersCheck.cpp",
"UnusedUsingDeclsCheck.cpp",
"UseAnonymousNamespaceCheck.cpp",
+ "UseInternalLinkageCheck.cpp",
]
}
>From 10b5dbfe0b68554c3438c6a28c684bac97dd5944 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 17:28:59 -0700
Subject: [PATCH 24/57] [RISCV] Rename VPseudoVWALU_VV_VX_VI to VPseudoVWSLL.
NFC
The scheduler class name is hardcoded in the class so its not a
general class.
---
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index d091077f729b8..957d295d5eca0 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -452,16 +452,16 @@ multiclass VPseudoVCPOP {
}
}
-multiclass VPseudoVWALU_VV_VX_VI<Operand ImmType> {
+multiclass VPseudoVWSLL {
foreach m = MxListW in {
defvar mx = m.MX;
defm "" : VPseudoBinaryW_VV<m>,
SchedBinary<"WriteVWSLLV", "ReadVWSLLV", "ReadVWSLLV", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryW_VX<m>,
+ defm "" : VPseudoBinaryW_VX<m>,
SchedBinary<"WriteVWSLLX", "ReadVWSLLV", "ReadVWSLLX", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryW_VI<ImmType, m>,
+ defm "" : VPseudoBinaryW_VI<uimm5, m>,
SchedUnary<"WriteVWSLLI", "ReadVWSLLV", mx,
forceMergeOpRead=true>;
}
@@ -525,7 +525,7 @@ let Predicates = [HasStdExtZvbb] in {
defm PseudoVCLZ : VPseudoVCLZ;
defm PseudoVCTZ : VPseudoVCTZ;
defm PseudoVCPOP : VPseudoVCPOP;
- defm PseudoVWSLL : VPseudoVWALU_VV_VX_VI<uimm5>;
+ defm PseudoVWSLL : VPseudoVWSLL;
} // Predicates = [HasStdExtZvbb]
let Predicates = [HasStdExtZvbc] in {
>From 242715cb7493a2eab77afb1e1f600aade1e24b26 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 17:34:11 -0700
Subject: [PATCH 25/57] [RISCV] Refactor VPseudoVROL and VPseudoVROR
multiclasses to use inheritance. NFC
VPseudoVROR can inherit from VPseudoVROL. Adjust the names to
VPseudoVROT_VV_VX and VPseudoVROT_VV_VX_VI.
---
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 22 ++++++++--------------
1 file changed, 8 insertions(+), 14 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 957d295d5eca0..fd4823306b029 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -494,7 +494,7 @@ multiclass VPseudoVREV8 {
}
}
-multiclass VPseudoVROL {
+multiclass VPseudoVROT_VV_VX {
foreach m = MxList in {
defm "" : VPseudoBinaryV_VV<m>,
SchedBinary<"WriteVRotV", "ReadVRotV", "ReadVRotV", m.MX,
@@ -505,18 +505,12 @@ multiclass VPseudoVROL {
}
}
-multiclass VPseudoVROR<Operand ImmType> {
- defvar Constraint = "";
+multiclass VPseudoVROT_VV_VX_VI
+ : VPseudoVROT_VV_VX {
foreach m = MxList in {
- defvar mx = m.MX;
- defm "" : VPseudoBinaryV_VV<m>,
- SchedBinary<"WriteVRotV", "ReadVRotV", "ReadVRotV", mx,
- forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VX<m>,
- SchedBinary<"WriteVRotX", "ReadVRotV", "ReadVRotX", mx,
- forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VI<ImmType, m>,
- SchedUnary<"WriteVRotI", "ReadVRotV", mx, forceMergeOpRead=true>;
+ defm "" : VPseudoBinaryV_VI<uimm6, m>,
+ SchedUnary<"WriteVRotI", "ReadVRotV", m.MX,
+ forceMergeOpRead=true>;
}
}
@@ -537,8 +531,8 @@ let Predicates = [HasStdExtZvkb] in {
defm PseudoVANDN : VPseudoVANDN;
defm PseudoVBREV8 : VPseudoVBREV8;
defm PseudoVREV8 : VPseudoVREV8;
- defm PseudoVROL : VPseudoVROL;
- defm PseudoVROR : VPseudoVROR<uimm6>;
+ defm PseudoVROL : VPseudoVROT_VV_VX;
+ defm PseudoVROR : VPseudoVROT_VV_VX_VI;
} // Predicates = [HasStdExtZvkb]
let Predicates = [HasStdExtZvkg] in {
>From fcd2828ac0b2a694e80038546a741e3b94ae4a00 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 17:49:50 -0700
Subject: [PATCH 26/57] [RISCV] Rename
VPseudoBinaryNoMaskTU->VPseudoBinaryNoMaskPolicy. NFC
These pseudoinstructions have a policy operand so calling them
TU is confusing.
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 18 ++++++------
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 28 +++++++++----------
2 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 818073d049192..ef52f57328f7b 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1234,11 +1234,11 @@ class VPseudoBinaryNoMask<VReg RetClass,
let HasSEWOp = 1;
}
-class VPseudoBinaryNoMaskTU<VReg RetClass,
- VReg Op1Class,
- DAGOperand Op2Class,
- string Constraint,
- int TargetConstraintType = 1> :
+class VPseudoBinaryNoMaskPolicy<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class,
+ string Constraint,
+ int TargetConstraintType = 1> :
Pseudo<(outs RetClass:$rd),
(ins RetClass:$merge, Op1Class:$rs2, Op2Class:$rs1, AVL:$vl,
ixlenimm:$sew, ixlenimm:$policy), []>,
@@ -2138,8 +2138,8 @@ multiclass VPseudoBinary<VReg RetClass,
bit Commutable = 0> {
let VLMul = MInfo.value, SEW=sew, isCommutable = Commutable in {
defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
- def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
- Constraint, TargetConstraintType>;
+ def suffix : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
+ Constraint, TargetConstraintType>;
def suffix # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
Constraint, TargetConstraintType>,
RISCVMaskedPseudo<MaskIdx=3>;
@@ -2197,8 +2197,8 @@ multiclass VPseudoBinaryEmul<VReg RetClass,
int sew = 0> {
let VLMul = lmul.value, SEW=sew in {
defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX);
- def suffix # "_" # emul.MX : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
- Constraint>;
+ def suffix # "_" # emul.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
+ Constraint>;
def suffix # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
Constraint>,
RISCVMaskedPseudo<MaskIdx=3>;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index fd4823306b029..82b3b6165e968 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -247,23 +247,23 @@ class VPseudoTernaryNoMask_Zvk<VReg RetClass,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
-multiclass VPseudoBinaryNoMaskTU_Zvk<VReg RetClass,
- VReg Op1Class,
- DAGOperand Op2Class,
- LMULInfo MInfo,
- string Constraint = "",
- int sew = 0> {
+multiclass VPseudoBinaryNoMaskPolicy_Zvk<VReg RetClass,
+ VReg Op1Class,
+ DAGOperand Op2Class,
+ LMULInfo MInfo,
+ string Constraint = "",
+ int sew = 0> {
let VLMul = MInfo.value, SEW=sew in {
defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
- def suffix : VPseudoBinaryNoMaskTU<RetClass, Op1Class, Op2Class,
- Constraint>;
+ def suffix : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
+ Constraint>;
}
}
multiclass VPseudoTernaryNoMask_Zvk<VReg RetClass,
- VReg Op1Class,
- DAGOperand Op2Class,
- LMULInfo MInfo> {
+ VReg Op1Class,
+ DAGOperand Op2Class,
+ LMULInfo MInfo> {
let VLMul = MInfo.value in
def "_" # MInfo.MX : VPseudoTernaryNoMask_Zvk<RetClass, Op1Class, Op2Class>;
}
@@ -349,7 +349,7 @@ multiclass VPseudoVSHA2MS {
multiclass VPseudoVAESKF1 {
foreach m = MxListVF4 in {
defvar mx = m.MX;
- defm _VI : VPseudoBinaryNoMaskTU_Zvk<m.vrclass, m.vrclass, uimm5, m>,
+ defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>,
SchedBinary<"WriteVAESKF1V", "ReadVAESKF1V", "ReadVAESKF1V", mx,
forceMergeOpRead=true>;
}
@@ -384,7 +384,7 @@ multiclass VPseudoVSM3C {
multiclass VPseudoVSM4K {
foreach m = MxListVF4 in {
defvar mx = m.MX;
- defm _VI : VPseudoBinaryNoMaskTU_Zvk<m.vrclass, m.vrclass, uimm5, m>,
+ defm _VI : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, uimm5, m>,
SchedBinary<"WriteVSM4KV", "ReadVSM4KV", "ReadVSM4KV", mx,
forceMergeOpRead=true>;
}
@@ -393,7 +393,7 @@ multiclass VPseudoVSM4K {
multiclass VPseudoVSM3ME {
foreach m = MxListVF4 in {
defvar mx = m.MX;
- defm _VV : VPseudoBinaryNoMaskTU_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
+ defm _VV : VPseudoBinaryNoMaskPolicy_Zvk<m.vrclass, m.vrclass, m.vrclass, m>,
SchedBinary<"WriteVSM3MEV", "ReadVSM3MEV", "ReadVSM3MEV", mx,
forceMergeOpRead=true>;
}
>From 263a4a0fa74b08f016b3f6fe625adb40dad532c9 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 18:04:04 -0700
Subject: [PATCH 27/57] [RISCV] Rename VPatBinarySwapped to VPatBinaryMSwapped.
NFC
This class is most closely related to VPatBinaryM.
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 52 +++++++++----------
1 file changed, 26 insertions(+), 26 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index ef52f57328f7b..1a514812c7361 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -4161,15 +4161,15 @@ class VPatBinaryNoMaskTURoundingMode<string intrinsic_name,
GPR:$vl, sew, TU_MU)>;
-// Same as above but source operands are swapped.
-class VPatBinaryNoMaskSwapped<string intrinsic_name,
- string inst,
- ValueType result_type,
- ValueType op1_type,
- ValueType op2_type,
- int sew,
- VReg op1_reg_class,
- DAGOperand op2_kind> :
+// Same as VPatBinaryM but source operands are swapped.
+class VPatBinaryMSwapped<string intrinsic_name,
+ string inst,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ int sew,
+ VReg op1_reg_class,
+ DAGOperand op2_kind> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
@@ -4248,7 +4248,7 @@ class VPatBinaryMaskTARoundingMode<string intrinsic_name,
(XLenVT timm:$round),
GPR:$vl, sew, (XLenVT timm:$policy))>;
-// Same as above but source operands are swapped.
+// Same as VPatBinaryMask but source operands are swapped.
class VPatBinaryMaskSwapped<string intrinsic_name,
string inst,
ValueType result_type,
@@ -4798,18 +4798,18 @@ multiclass VPatBinaryRoundingMode<string intrinsic,
op2_kind>;
}
-multiclass VPatBinarySwapped<string intrinsic,
- string inst,
- ValueType result_type,
- ValueType op1_type,
- ValueType op2_type,
- ValueType mask_type,
- int sew,
- VReg result_reg_class,
- VReg op1_reg_class,
- DAGOperand op2_kind> {
- def : VPatBinaryNoMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
- sew, op1_reg_class, op2_kind>;
+multiclass VPatBinaryMSwapped<string intrinsic,
+ string inst,
+ ValueType result_type,
+ ValueType op1_type,
+ ValueType op2_type,
+ ValueType mask_type,
+ int sew,
+ VReg result_reg_class,
+ VReg op1_reg_class,
+ DAGOperand op2_kind> {
+ def : VPatBinaryMSwapped<intrinsic, inst, result_type, op1_type, op2_type,
+ sew, op1_reg_class, op2_kind>;
def : VPatBinaryMaskSwapped<intrinsic, inst, result_type, op1_type, op2_type,
mask_type, sew, result_reg_class, op1_reg_class,
op2_kind>;
@@ -5406,10 +5406,10 @@ multiclass VPatBinarySwappedM_VV<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in
let Predicates = GetVTypePredicates<vti>.Predicates in
- defm : VPatBinarySwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
- vti.Mask, vti.Vector, vti.Vector, vti.Mask,
- vti.Log2SEW, VR,
- vti.RegClass, vti.RegClass>;
+ defm : VPatBinaryMSwapped<intrinsic, instruction # "_VV_" # vti.LMul.MX,
+ vti.Mask, vti.Vector, vti.Vector, vti.Mask,
+ vti.Log2SEW, VR,
+ vti.RegClass, vti.RegClass>;
}
multiclass VPatBinaryM_VX<string intrinsic, string instruction,
>From 3a8a0c852a23067204810f264d72135a21b30a16 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 18:42:21 -0700
Subject: [PATCH 28/57] [RISCV] Flatten VPatBinaryW_VI_VWSLL and
VPatBinaryW_VX_VWSLL into VPatBinaryW_VV_VX_VI_VWSLL. NFC
---
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 38 ++++++++--------------
1 file changed, 13 insertions(+), 25 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 82b3b6165e968..98b5aeef9fe2d 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -1031,39 +1031,27 @@ multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction,
VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
-multiclass VPatBinaryW_VI_VWSLL<string intrinsic, string instruction,
- list<VTypeInfoToWide> vtilist> {
- foreach VtiToWti = vtilist in {
- defvar Vti = VtiToWti.Vti;
- defvar Wti = VtiToWti.Wti;
- defm : VPatBinary<intrinsic, instruction # "_VI_" # Vti.LMul.MX,
- Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
- Vti.Log2SEW, Wti.RegClass,
- Vti.RegClass, uimm5>;
- }
-}
-
-multiclass VPatBinaryW_VX_VWSLL<string intrinsic, string instruction,
- list<VTypeInfoToWide> vtilist> {
+multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction,
+ list<VTypeInfoToWide> vtilist>
+ : VPatBinaryW_VV<intrinsic, instruction, vtilist> {
foreach VtiToWti = vtilist in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defvar kind = "V"#Vti.ScalarSuffix;
let Predicates = !listconcat(GetVTypePredicates<Vti>.Predicates,
- GetVTypePredicates<Wti>.Predicates) in
- defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
- Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
- Vti.Log2SEW, Wti.RegClass,
- Vti.RegClass, Vti.ScalarRegClass>;
+ GetVTypePredicates<Wti>.Predicates) in {
+ defm : VPatBinary<intrinsic, instruction#"_"#kind#"_"#Vti.LMul.MX,
+ Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
+ Vti.Log2SEW, Wti.RegClass,
+ Vti.RegClass, Vti.ScalarRegClass>;
+ defm : VPatBinary<intrinsic, instruction # "_VI_" # Vti.LMul.MX,
+ Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
+ Vti.Log2SEW, Wti.RegClass,
+ Vti.RegClass, uimm5>;
+ }
}
}
-multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction,
- list<VTypeInfoToWide> vtilist>
- : VPatBinaryW_VV<intrinsic, instruction, vtilist>,
- VPatBinaryW_VX_VWSLL<intrinsic, instruction, vtilist>,
- VPatBinaryW_VI_VWSLL<intrinsic, instruction, vtilist>;
-
let Predicates = [HasStdExtZvbb] in {
defm : VPatUnaryV_V<"int_riscv_vbrev", "PseudoVBREV", AllIntegerVectors>;
defm : VPatUnaryV_V<"int_riscv_vclz", "PseudoVCLZ", AllIntegerVectors>;
>From c09d84da78f8bfe37b35831cb7c9f500183b6f4c Mon Sep 17 00:00:00 2001
From: Tom Stellard <tstellar at redhat.com>
Date: Fri, 7 Jun 2024 19:02:55 -0700
Subject: [PATCH 29/57] [workflows] Add post-commit job that periodically runs
the clang static analyzer (#94106)
This job will run once per day on the main branch, and for every commit
on a release branch. It currently only builds llvm, but could add more
sub-projects in the future.
OpenSSF Best Practices recommends running a static analyzer on software
before it is released:
https://www.bestpractices.dev/en/criteria/0#0.static_analysis
---
.../workflows/ci-post-commit-analyzer-run.py | 34 +++++++
.github/workflows/ci-post-commit-analyzer.yml | 95 +++++++++++++++++++
2 files changed, 129 insertions(+)
create mode 100644 .github/workflows/ci-post-commit-analyzer-run.py
create mode 100644 .github/workflows/ci-post-commit-analyzer.yml
diff --git a/.github/workflows/ci-post-commit-analyzer-run.py b/.github/workflows/ci-post-commit-analyzer-run.py
new file mode 100644
index 0000000000000..e5f52d3b2fa67
--- /dev/null
+++ b/.github/workflows/ci-post-commit-analyzer-run.py
@@ -0,0 +1,34 @@
+import json
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+
+
+def run_analyzer(data):
+ os.chdir(data["directory"])
+ command = (
+ data["command"]
+ + f" --analyze --analyzer-output html -o analyzer-results -Xclang -analyzer-config -Xclang max-nodes=75000"
+ )
+ print(command)
+ subprocess.run(command, shell=True, check=True)
+
+
+def pool_error(e):
+ print("Error analyzing file:", e)
+
+
+def main():
+ db_path = sys.argv[1]
+ database = json.load(open(db_path))
+
+ with multiprocessing.Pool() as pool:
+ pool.map_async(run_analyzer, [k for k in database], error_callback=pool_error)
+ pool.close()
+ pool.join()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/.github/workflows/ci-post-commit-analyzer.yml b/.github/workflows/ci-post-commit-analyzer.yml
new file mode 100644
index 0000000000000..d614dd07b3a49
--- /dev/null
+++ b/.github/workflows/ci-post-commit-analyzer.yml
@@ -0,0 +1,95 @@
+name: Post-Commit Static Analyzer
+
+permissions:
+ contents: read
+
+on:
+ push:
+ branches:
+ - 'release/**'
+ paths:
+ - 'clang/**'
+ - 'llvm/**'
+ - '.github/workflows/ci-post-commit-analyzer.yml'
+ pull_request:
+ types:
+ - opened
+ - synchronize
+ - reopened
+ - closed
+ paths:
+ - '.github/workflows/ci-post-commit-analyzer.yml'
+ - '.github/workflows/ci-post-commit-analyzer-run.py'
+ schedule:
+ - cron: '30 0 * * *'
+
+concurrency:
+ group: >-
+ llvm-project-${{ github.workflow }}-${{ github.event_name == 'pull_request' &&
+ ( github.event.pull_request.number || github.ref) }}
+ cancel-in-progress: ${{ startsWith(github.ref, 'refs/pull/') }}
+
+jobs:
+ post-commit-analyzer:
+ if: >-
+ github.repository_owner == 'llvm' &&
+ github.event.action != 'closed'
+ runs-on: ubuntu-22.04
+ container:
+ image: 'ghcr.io/llvm/ci-ubuntu-22.04:latest'
+ env:
+ LLVM_VERSION: 18
+ steps:
+ - name: Checkout Source
+ uses: actions/checkout at b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
+
+ - name: Setup ccache
+ uses: hendrikmuhs/ccache-action at v1
+ with:
+ # A full build of llvm, clang, lld, and lldb takes about 250MB
+ # of ccache space. There's not much reason to have more than this,
+ # because we usually won't need to save cache entries from older
+ # builds. Also, there is an overall 10GB cache limit, and each
+ # run creates a new cache entry so we want to ensure that we have
+ # enough cache space for all the tests to run at once and still
+ # fit under the 10 GB limit.
+ # Default to 2G to workaround: https://github.com/hendrikmuhs/ccache-action/issues/174
+ max-size: 2G
+ key: post-commit-analyzer
+ variant: sccache
+
+ - name: Configure
+ run: |
+ cmake -B build -S llvm -G Ninja \
+ -DLLVM_ENABLE_ASSERTIONS=ON \
+ -DLLVM_ENABLE_PROJECTS=clang \
+ -DLLVM_BUILD_LLVM_DYLIB=ON \
+ -DLLVM_LINK_LLVM_DYLIB=ON \
+ -DCMAKE_CXX_COMPILER=clang++ \
+ -DCMAKE_C_COMPILER=clang \
+ -DCMAKE_CXX_COMPILER_LAUNCHER=sccache \
+ -DCMAKE_C_COMPILER_LAUNCHER=sccache \
+ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
+ -DLLVM_INCLUDE_TESTS=OFF \
+ -DCLANG_INCLUDE_TESTS=OFF \
+ -DCMAKE_BUILD_TYPE=Release
+
+ - name: Build
+ run: |
+ # FIXME: We need to build all the generated header files in order to be able to run
+ # the analyzer on every file. Building libLLVM and libclang is probably overkill for
+ # this, but it's better than building every target.
+ ninja -v -C build libLLVM.so libclang.so
+
+ # Run the analyzer.
+ python3 .github/workflows/ci-post-commit-analyzer-run.py build/compile_commands.json
+
+ scan-build --generate-index-only build/analyzer-results
+
+ - name: Upload Results
+ uses: actions/upload-artifact at 26f96dfa697d77e81fd5907df203aa23a56210a8 #v4.3.0
+ if: always()
+ with:
+ name: analyzer-results
+ path: 'build/analyzer-results/*'
+
>From e783bcb268387dcc677be5b0b4633467db24995c Mon Sep 17 00:00:00 2001
From: Jie Fu <jiefu at tencent.com>
Date: Sat, 8 Jun 2024 11:55:40 +0800
Subject: [PATCH 30/57] [mlir] Handle the newly-added "Reserved"
FramePointerKind for 1a5239251ead73ee57f4e2f7fc93433ac7cf18b1 (NFC)
/llvm-project/mlir/lib/Target/LLVMIR/ModuleImport.cpp:48:
tools/mlir/include/mlir/Dialect/LLVMIR/LLVMConversionEnumsFromLLVM.inc:158:11:
error: enumeration value 'Reserved' not handled in switch [-Werror,-Wswitch]
switch (value) {
^~~~~
1 error generated.
---
mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
index a93964abcb42e..f8e85004d5f93 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
@@ -716,12 +716,15 @@ def FramePointerKindNonLeaf
: LLVM_EnumAttrCase<"NonLeaf", "non-leaf", "NonLeaf", 1>;
def FramePointerKindAll
: LLVM_EnumAttrCase<"All", "all", "All", 2>;
+def FramePointerKindReserved
+ : LLVM_EnumAttrCase<"Reserved", "reserved", "Reserved", 3>;
def FramePointerKindEnum : LLVM_EnumAttr<
"FramePointerKind",
"::llvm::FramePointerKind",
"LLVM FramePointerKind",
- [FramePointerKindNone, FramePointerKindNonLeaf, FramePointerKindAll]> {
+ [FramePointerKindNone, FramePointerKindNonLeaf,
+ FramePointerKindAll, FramePointerKindReserved]> {
let cppNamespace = "::mlir::LLVM::framePointerKind";
}
>From ecbc6c8aac916b4efe70d77f6407fb5c555200cf Mon Sep 17 00:00:00 2001
From: Thurston Dang <thurston at google.com>
Date: Fri, 7 Jun 2024 21:10:13 -0700
Subject: [PATCH 31/57] [dfsan] Fix release_shadow_space.c (#94770)
DFSan's sscanf is incorrect
(https://github.com/llvm/llvm-project/issues/94769), which results in
erroneous matches when scraping RSS from /proc/maps. This patch works
around the issue by using strstr as a secondary check.
It also adds a loose validity check for the initial RSS measurement, to
guard against regressions in get_rss_kb().
Fixes https://github.com/llvm/llvm-project/issues/91287
---
compiler-rt/test/dfsan/release_shadow_space.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/compiler-rt/test/dfsan/release_shadow_space.c b/compiler-rt/test/dfsan/release_shadow_space.c
index 60dec98ebec49..0f0e1a9c731cb 100644
--- a/compiler-rt/test/dfsan/release_shadow_space.c
+++ b/compiler-rt/test/dfsan/release_shadow_space.c
@@ -3,9 +3,6 @@
// DFSAN_OPTIONS=no_huge_pages_for_shadow=false RUN: %clang_dfsan %s -DORIGIN_TRACKING -mllvm -dfsan-track-origins=1 -o %t && %run %t
// DFSAN_OPTIONS=no_huge_pages_for_shadow=true RUN: %clang_dfsan %s -DORIGIN_TRACKING -mllvm -dfsan-track-origins=1 -o %t && %run %t
-// This test is flaky right now: https://github.com/llvm/llvm-project/issues/91287
-// UNSUPPORTED: target={{.*}}
-
#include <assert.h>
#include <sanitizer/dfsan_interface.h>
#include <stdbool.h>
@@ -26,7 +23,11 @@ size_t get_rss_kb() {
char buf[256];
while (fgets(buf, sizeof(buf), f) != NULL) {
int64_t rss;
- if (sscanf(buf, "Rss: %ld kB", &rss) == 1)
+ // DFSan's sscanf is broken and doesn't check for ordinary characters in
+ // the format string, hence we use strstr as a secondary check
+ // (https://github.com/llvm/llvm-project/issues/94769).
+ if ((sscanf(buf, "Rss: %ld kB", &rss) == 1) &&
+ (strstr(buf, "Rss: ") != NULL))
ret += rss;
}
assert(feof(f));
@@ -73,6 +74,11 @@ int main(int argc, char **argv) {
before, after_mmap, after_mmap_and_set_label, after_fixed_mmap,
after_mmap_and_set_label2, after_munmap);
+ // This is orders of magnitude larger than we expect (typically < 10,000KB).
+ // It is a quick check to ensure that the RSS calculation function isn't
+ // egregriously wrong.
+ assert(before < 1000000);
+
const size_t mmap_cost_kb = map_size >> 10;
// Shadow space (1:1 with application memory)
const size_t mmap_shadow_cost_kb = sizeof(dfsan_label) * mmap_cost_kb;
>From ea0ca85c2541abe373d49eca9c0de9be325a5f65 Mon Sep 17 00:00:00 2001
From: Helena Kotas <hekotas at microsoft.com>
Date: Fri, 7 Jun 2024 21:30:04 -0700
Subject: [PATCH 32/57] [HLSL] Use llvm::Triple::EnvironmentType instead of
HLSLShaderAttr::ShaderType (#93847)
`HLSLShaderAttr::ShaderType` enum is a subset of
`llvm::Triple::EnvironmentType`. We can use
`llvm::Triple::EnvironmentType` directly and avoid converting one enum
to another.
---
clang/include/clang/Basic/Attr.td | 27 ++-----
clang/include/clang/Sema/SemaHLSL.h | 6 +-
clang/lib/CodeGen/CGHLSLRuntime.cpp | 2 +-
clang/lib/Sema/SemaHLSL.cpp | 109 +++++++++++++++-------------
4 files changed, 69 insertions(+), 75 deletions(-)
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index 17d9a710d948b..b70b0c8b836a5 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -4470,37 +4470,20 @@ def HLSLShader : InheritableAttr {
let Subjects = SubjectList<[HLSLEntry]>;
let LangOpts = [HLSL];
let Args = [
- EnumArgument<"Type", "ShaderType", /*is_string=*/true,
+ EnumArgument<"Type", "llvm::Triple::EnvironmentType", /*is_string=*/true,
["pixel", "vertex", "geometry", "hull", "domain", "compute",
"raygeneration", "intersection", "anyhit", "closesthit",
"miss", "callable", "mesh", "amplification"],
["Pixel", "Vertex", "Geometry", "Hull", "Domain", "Compute",
"RayGeneration", "Intersection", "AnyHit", "ClosestHit",
- "Miss", "Callable", "Mesh", "Amplification"]>
+ "Miss", "Callable", "Mesh", "Amplification"],
+ /*opt=*/0, /*fake=*/0, /*isExternalType=*/1>
];
let Documentation = [HLSLSV_ShaderTypeAttrDocs];
let AdditionalMembers =
[{
- static const unsigned ShaderTypeMaxValue = (unsigned)HLSLShaderAttr::Amplification;
-
- static llvm::Triple::EnvironmentType getTypeAsEnvironment(HLSLShaderAttr::ShaderType ShaderType) {
- switch (ShaderType) {
- case HLSLShaderAttr::Pixel: return llvm::Triple::Pixel;
- case HLSLShaderAttr::Vertex: return llvm::Triple::Vertex;
- case HLSLShaderAttr::Geometry: return llvm::Triple::Geometry;
- case HLSLShaderAttr::Hull: return llvm::Triple::Hull;
- case HLSLShaderAttr::Domain: return llvm::Triple::Domain;
- case HLSLShaderAttr::Compute: return llvm::Triple::Compute;
- case HLSLShaderAttr::RayGeneration: return llvm::Triple::RayGeneration;
- case HLSLShaderAttr::Intersection: return llvm::Triple::Intersection;
- case HLSLShaderAttr::AnyHit: return llvm::Triple::AnyHit;
- case HLSLShaderAttr::ClosestHit: return llvm::Triple::ClosestHit;
- case HLSLShaderAttr::Miss: return llvm::Triple::Miss;
- case HLSLShaderAttr::Callable: return llvm::Triple::Callable;
- case HLSLShaderAttr::Mesh: return llvm::Triple::Mesh;
- case HLSLShaderAttr::Amplification: return llvm::Triple::Amplification;
- }
- llvm_unreachable("unknown enumeration value");
+ static bool isValidShaderType(llvm::Triple::EnvironmentType ShaderType) {
+ return ShaderType >= llvm::Triple::Pixel && ShaderType <= llvm::Triple::Amplification;
}
}];
}
diff --git a/clang/include/clang/Sema/SemaHLSL.h b/clang/include/clang/Sema/SemaHLSL.h
index e145f5e7f43f8..0e41a72e444ef 100644
--- a/clang/include/clang/Sema/SemaHLSL.h
+++ b/clang/include/clang/Sema/SemaHLSL.h
@@ -39,7 +39,7 @@ class SemaHLSL : public SemaBase {
const AttributeCommonInfo &AL, int X,
int Y, int Z);
HLSLShaderAttr *mergeShaderAttr(Decl *D, const AttributeCommonInfo &AL,
- HLSLShaderAttr::ShaderType ShaderType);
+ llvm::Triple::EnvironmentType ShaderType);
HLSLParamModifierAttr *
mergeParamModifierAttr(Decl *D, const AttributeCommonInfo &AL,
HLSLParamModifierAttr::Spelling Spelling);
@@ -48,8 +48,8 @@ class SemaHLSL : public SemaBase {
void CheckSemanticAnnotation(FunctionDecl *EntryPoint, const Decl *Param,
const HLSLAnnotationAttr *AnnotationAttr);
void DiagnoseAttrStageMismatch(
- const Attr *A, HLSLShaderAttr::ShaderType Stage,
- std::initializer_list<HLSLShaderAttr::ShaderType> AllowedStages);
+ const Attr *A, llvm::Triple::EnvironmentType Stage,
+ std::initializer_list<llvm::Triple::EnvironmentType> AllowedStages);
void DiagnoseAvailabilityViolations(TranslationUnitDecl *TU);
void handleNumThreadsAttr(Decl *D, const ParsedAttr &AL);
diff --git a/clang/lib/CodeGen/CGHLSLRuntime.cpp b/clang/lib/CodeGen/CGHLSLRuntime.cpp
index 5e6a3dd4878f4..55ba21ae2ba69 100644
--- a/clang/lib/CodeGen/CGHLSLRuntime.cpp
+++ b/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -313,7 +313,7 @@ void clang::CodeGen::CGHLSLRuntime::setHLSLEntryAttributes(
assert(ShaderAttr && "All entry functions must have a HLSLShaderAttr");
const StringRef ShaderAttrKindStr = "hlsl.shader";
Fn->addFnAttr(ShaderAttrKindStr,
- ShaderAttr->ConvertShaderTypeToStr(ShaderAttr->getType()));
+ llvm::Triple::getEnvironmentTypeName(ShaderAttr->getType()));
if (HLSLNumThreadsAttr *NumThreadsAttr = FD->getAttr<HLSLNumThreadsAttr>()) {
const StringRef NumThreadsKindStr = "hlsl.numthreads";
std::string NumThreadsStr =
diff --git a/clang/lib/Sema/SemaHLSL.cpp b/clang/lib/Sema/SemaHLSL.cpp
index 0a2face7afe65..144cdcc0d98ef 100644
--- a/clang/lib/Sema/SemaHLSL.cpp
+++ b/clang/lib/Sema/SemaHLSL.cpp
@@ -146,7 +146,7 @@ HLSLNumThreadsAttr *SemaHLSL::mergeNumThreadsAttr(Decl *D,
HLSLShaderAttr *
SemaHLSL::mergeShaderAttr(Decl *D, const AttributeCommonInfo &AL,
- HLSLShaderAttr::ShaderType ShaderType) {
+ llvm::Triple::EnvironmentType ShaderType) {
if (HLSLShaderAttr *NT = D->getAttr<HLSLShaderAttr>()) {
if (NT->getType() != ShaderType) {
Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
@@ -184,13 +184,12 @@ void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) {
if (FD->getName() != TargetInfo.getTargetOpts().HLSLEntry)
return;
- StringRef Env = TargetInfo.getTriple().getEnvironmentName();
- HLSLShaderAttr::ShaderType ShaderType;
- if (HLSLShaderAttr::ConvertStrToShaderType(Env, ShaderType)) {
+ llvm::Triple::EnvironmentType Env = TargetInfo.getTriple().getEnvironment();
+ if (HLSLShaderAttr::isValidShaderType(Env) && Env != llvm::Triple::Library) {
if (const auto *Shader = FD->getAttr<HLSLShaderAttr>()) {
// The entry point is already annotated - check that it matches the
// triple.
- if (Shader->getType() != ShaderType) {
+ if (Shader->getType() != Env) {
Diag(Shader->getLocation(), diag::err_hlsl_entry_shader_attr_mismatch)
<< Shader;
FD->setInvalidDecl();
@@ -198,11 +197,11 @@ void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) {
} else {
// Implicitly add the shader attribute if the entry function isn't
// explicitly annotated.
- FD->addAttr(HLSLShaderAttr::CreateImplicit(getASTContext(), ShaderType,
+ FD->addAttr(HLSLShaderAttr::CreateImplicit(getASTContext(), Env,
FD->getBeginLoc()));
}
} else {
- switch (TargetInfo.getTriple().getEnvironment()) {
+ switch (Env) {
case llvm::Triple::UnknownEnvironment:
case llvm::Triple::Library:
break;
@@ -215,38 +214,40 @@ void SemaHLSL::ActOnTopLevelFunction(FunctionDecl *FD) {
void SemaHLSL::CheckEntryPoint(FunctionDecl *FD) {
const auto *ShaderAttr = FD->getAttr<HLSLShaderAttr>();
assert(ShaderAttr && "Entry point has no shader attribute");
- HLSLShaderAttr::ShaderType ST = ShaderAttr->getType();
+ llvm::Triple::EnvironmentType ST = ShaderAttr->getType();
switch (ST) {
- case HLSLShaderAttr::Pixel:
- case HLSLShaderAttr::Vertex:
- case HLSLShaderAttr::Geometry:
- case HLSLShaderAttr::Hull:
- case HLSLShaderAttr::Domain:
- case HLSLShaderAttr::RayGeneration:
- case HLSLShaderAttr::Intersection:
- case HLSLShaderAttr::AnyHit:
- case HLSLShaderAttr::ClosestHit:
- case HLSLShaderAttr::Miss:
- case HLSLShaderAttr::Callable:
+ case llvm::Triple::Pixel:
+ case llvm::Triple::Vertex:
+ case llvm::Triple::Geometry:
+ case llvm::Triple::Hull:
+ case llvm::Triple::Domain:
+ case llvm::Triple::RayGeneration:
+ case llvm::Triple::Intersection:
+ case llvm::Triple::AnyHit:
+ case llvm::Triple::ClosestHit:
+ case llvm::Triple::Miss:
+ case llvm::Triple::Callable:
if (const auto *NT = FD->getAttr<HLSLNumThreadsAttr>()) {
DiagnoseAttrStageMismatch(NT, ST,
- {HLSLShaderAttr::Compute,
- HLSLShaderAttr::Amplification,
- HLSLShaderAttr::Mesh});
+ {llvm::Triple::Compute,
+ llvm::Triple::Amplification,
+ llvm::Triple::Mesh});
FD->setInvalidDecl();
}
break;
- case HLSLShaderAttr::Compute:
- case HLSLShaderAttr::Amplification:
- case HLSLShaderAttr::Mesh:
+ case llvm::Triple::Compute:
+ case llvm::Triple::Amplification:
+ case llvm::Triple::Mesh:
if (!FD->hasAttr<HLSLNumThreadsAttr>()) {
Diag(FD->getLocation(), diag::err_hlsl_missing_numthreads)
- << HLSLShaderAttr::ConvertShaderTypeToStr(ST);
+ << llvm::Triple::getEnvironmentTypeName(ST);
FD->setInvalidDecl();
}
break;
+ default:
+ llvm_unreachable("Unhandled environment in triple");
}
for (ParmVarDecl *Param : FD->parameters()) {
@@ -268,14 +269,14 @@ void SemaHLSL::CheckSemanticAnnotation(
const HLSLAnnotationAttr *AnnotationAttr) {
auto *ShaderAttr = EntryPoint->getAttr<HLSLShaderAttr>();
assert(ShaderAttr && "Entry point has no shader attribute");
- HLSLShaderAttr::ShaderType ST = ShaderAttr->getType();
+ llvm::Triple::EnvironmentType ST = ShaderAttr->getType();
switch (AnnotationAttr->getKind()) {
case attr::HLSLSV_DispatchThreadID:
case attr::HLSLSV_GroupIndex:
- if (ST == HLSLShaderAttr::Compute)
+ if (ST == llvm::Triple::Compute)
return;
- DiagnoseAttrStageMismatch(AnnotationAttr, ST, {HLSLShaderAttr::Compute});
+ DiagnoseAttrStageMismatch(AnnotationAttr, ST, {llvm::Triple::Compute});
break;
default:
llvm_unreachable("Unknown HLSLAnnotationAttr");
@@ -283,16 +284,16 @@ void SemaHLSL::CheckSemanticAnnotation(
}
void SemaHLSL::DiagnoseAttrStageMismatch(
- const Attr *A, HLSLShaderAttr::ShaderType Stage,
- std::initializer_list<HLSLShaderAttr::ShaderType> AllowedStages) {
+ const Attr *A, llvm::Triple::EnvironmentType Stage,
+ std::initializer_list<llvm::Triple::EnvironmentType> AllowedStages) {
SmallVector<StringRef, 8> StageStrings;
llvm::transform(AllowedStages, std::back_inserter(StageStrings),
- [](HLSLShaderAttr::ShaderType ST) {
+ [](llvm::Triple::EnvironmentType ST) {
return StringRef(
- HLSLShaderAttr::ConvertShaderTypeToStr(ST));
+ HLSLShaderAttr::ConvertEnvironmentTypeToStr(ST));
});
Diag(A->getLoc(), diag::err_hlsl_attr_unsupported_in_stage)
- << A << HLSLShaderAttr::ConvertShaderTypeToStr(Stage)
+ << A << llvm::Triple::getEnvironmentTypeName(Stage)
<< (AllowedStages.size() != 1) << join(StageStrings, ", ");
}
@@ -430,8 +431,8 @@ void SemaHLSL::handleShaderAttr(Decl *D, const ParsedAttr &AL) {
if (!SemaRef.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
return;
- HLSLShaderAttr::ShaderType ShaderType;
- if (!HLSLShaderAttr::ConvertStrToShaderType(Str, ShaderType)) {
+ llvm::Triple::EnvironmentType ShaderType;
+ if (!HLSLShaderAttr::ConvertStrToEnvironmentType(Str, ShaderType)) {
Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
<< AL << Str << ArgLoc;
return;
@@ -549,16 +550,22 @@ class DiagnoseHLSLAvailability
//
// Maps FunctionDecl to an unsigned number that represents the set of shader
// environments the function has been scanned for.
- // Since HLSLShaderAttr::ShaderType enum is generated from Attr.td and is
- // defined without any assigned values, it is guaranteed to be numbered
- // sequentially from 0 up and we can use it to 'index' individual bits
- // in the set.
+ // The llvm::Triple::EnvironmentType enum values for shader stages guaranteed
+ // to be numbered from llvm::Triple::Pixel to llvm::Triple::Amplification
+ // (verified by static_asserts in Triple.cpp), we can use it to index
+ // individual bits in the set, as long as we shift the values to start with 0
+ // by subtracting the value of llvm::Triple::Pixel first.
+ //
// The N'th bit in the set will be set if the function has been scanned
- // in shader environment whose ShaderType integer value equals N.
+ // in shader environment whose llvm::Triple::EnvironmentType integer value
+ // equals (llvm::Triple::Pixel + N).
+ //
// For example, if a function has been scanned in compute and pixel stage
- // environment, the value will be 0x21 (100001 binary) because
- // (int)HLSLShaderAttr::ShaderType::Pixel == 1 and
- // (int)HLSLShaderAttr::ShaderType::Compute == 5.
+ // environment, the value will be 0x21 (100001 binary) because:
+ //
+ // (int)(llvm::Triple::Pixel - llvm::Triple::Pixel) == 0
+ // (int)(llvm::Triple::Compute - llvm::Triple::Pixel) == 5
+ //
// A FunctionDecl is mapped to 0 (or not included in the map) if it has not
// been scanned in any environment.
llvm::DenseMap<const FunctionDecl *, unsigned> ScannedDecls;
@@ -574,12 +581,16 @@ class DiagnoseHLSLAvailability
bool ReportOnlyShaderStageIssues;
// Helper methods for dealing with current stage context / environment
- void SetShaderStageContext(HLSLShaderAttr::ShaderType ShaderType) {
+ void SetShaderStageContext(llvm::Triple::EnvironmentType ShaderType) {
static_assert(sizeof(unsigned) >= 4);
- assert((unsigned)ShaderType < 31); // 31 is reserved for "unknown"
-
- CurrentShaderEnvironment = HLSLShaderAttr::getTypeAsEnvironment(ShaderType);
- CurrentShaderStageBit = (1 << ShaderType);
+ assert(HLSLShaderAttr::isValidShaderType(ShaderType));
+ assert((unsigned)(ShaderType - llvm::Triple::Pixel) < 31 &&
+ "ShaderType is too big for this bitmap"); // 31 is reserved for
+ // "unknown"
+
+ unsigned bitmapIndex = ShaderType - llvm::Triple::Pixel;
+ CurrentShaderEnvironment = ShaderType;
+ CurrentShaderStageBit = (1 << bitmapIndex);
}
void SetUnknownShaderStageContext() {
>From f5b47f539a84895a1e67394ca4fa4da2ac1edf67 Mon Sep 17 00:00:00 2001
From: Vladimir Vereschaka <vvereschaka at accesssoftek.com>
Date: Fri, 7 Jun 2024 22:05:41 -0700
Subject: [PATCH 33/57] [CMake] Update CMake cache file for the ARM/Aarch64
cross toolchain builds. NFC. (#94835)
* generate Clang configuration file with provided target sysroot
(TOOLCHAIN_TARGET_SYSROOTFS)
* explicitly pass provided target sysroot into the compiler-rt tests
configuration.
* added ability to configure a type of the build libraries -- shared or
static (TOOLCHAIN_SHARED_LIBS, default OFF)
In behalf of: #94284
---
clang/cmake/caches/CrossWinToARMLinux.cmake | 45 ++++++++++++++-------
1 file changed, 31 insertions(+), 14 deletions(-)
diff --git a/clang/cmake/caches/CrossWinToARMLinux.cmake b/clang/cmake/caches/CrossWinToARMLinux.cmake
index 6826d01f8b2a7..e4d0a0c2d14cb 100644
--- a/clang/cmake/caches/CrossWinToARMLinux.cmake
+++ b/clang/cmake/caches/CrossWinToARMLinux.cmake
@@ -6,21 +6,23 @@
# on Windows platform.
#
# NOTE: the build requires a development ARM Linux root filesystem to use
-# proper target platform depended library and header files:
-# - create <full-path-to-clang-configs> directory and put the clang configuration
-# file named <TOOLCHAIN_TARGET_TRIPLE>.cfg into it.
-# - add the `--sysroot=<path-to-develop-arm-linux-root-fs>` argument into
-# this configuration file.
-# - add other necessary target depended clang arguments there,
-# such as '-mcpu=cortex-a78' & etc.
+# proper target platform depended library and header files.
+#
+# The build generates a proper clang configuration file with stored
+# --sysroot argument for specified target triple. Also it is possible
+# to specify configuration path via CMake arguments, such as
+# -DCLANG_CONFIG_FILE_USER_DIR=<full-path-to-clang-configs>
+# and/or
+# -DCLANG_CONFIG_FILE_SYSTEM_DIR=<full-path-to-clang-configs>
#
# See more details here: https://clang.llvm.org/docs/UsersManual.html#configuration-files
#
# Configure:
# cmake -G Ninja ^
# -DTOOLCHAIN_TARGET_TRIPLE=aarch64-unknown-linux-gnu ^
+# -DTOOLCHAIN_TARGET_SYSROOTFS=<path-to-develop-arm-linux-root-fs> ^
+# -DTOOLCHAIN_SHARED_LIBS=OFF ^
# -DCMAKE_INSTALL_PREFIX=../install ^
-# -DCLANG_CONFIG_FILE_USER_DIR=<full-path-to-clang-configs> ^
# -DCMAKE_CXX_FLAGS="-D__OPTIMIZE__" ^
# -DREMOTE_TEST_HOST="<hostname>" ^
# -DREMOTE_TEST_USER="<ssh_user_name>" ^
@@ -81,6 +83,20 @@ endif()
message(STATUS "Toolchain target triple: ${TOOLCHAIN_TARGET_TRIPLE}")
+if (DEFINED TOOLCHAIN_TARGET_SYSROOTFS)
+ message(STATUS "Toolchain target sysroot: ${TOOLCHAIN_TARGET_SYSROOTFS}")
+ # Store the --sysroot argument for the compiler-rt test flags.
+ set(sysroot_flags --sysroot='${TOOLCHAIN_TARGET_SYSROOTFS}')
+ # Generate the clang configuration file for the specified target triple
+ # and store --sysroot in this file.
+ file(WRITE "${CMAKE_BINARY_DIR}/bin/${TOOLCHAIN_TARGET_TRIPLE}.cfg" ${sysroot_flags})
+endif()
+
+# Build the shared libraries for libc++/libc++abi/libunwind.
+if (NOT DEFINED TOOLCHAIN_SHARED_LIBS)
+ set(TOOLCHAIN_SHARED_LIBS OFF)
+endif()
+
if (NOT DEFINED LLVM_TARGETS_TO_BUILD)
if ("${TOOLCHAIN_TARGET_TRIPLE}" MATCHES "^(armv|arm32)+")
set(LLVM_TARGETS_TO_BUILD "ARM" CACHE STRING "")
@@ -183,20 +199,21 @@ set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_CAN_EXECUTE_TESTS
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_USE_BUILTINS_LIBRARY ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_CXX_LIBRARY libcxx CACHE STRING "")
-# Tell Clang to seach C++ headers alongside with the just-built binaries for the C++ compiler-rt tests.
-set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_TEST_COMPILER_CFLAGS "--stdlib=libc++" CACHE STRING "")
-
+# The compiler-rt tests disable the clang configuration files during the execution by setting CLANG_NO_DEFAULT_CONFIG=1
+# and drops out the --sysroot from there. Provide it explicity via the test flags here if target sysroot has been specified.
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_COMPILER_RT_TEST_COMPILER_CFLAGS "--stdlib=libc++ ${sysroot_flags}" CACHE STRING "")
+
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBUNWIND_USE_COMPILER_RT ON CACHE BOOL "")
-set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBUNWIND_ENABLE_SHARED OFF CACHE BOOL "")
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBUNWIND_ENABLE_SHARED ${TOOLCHAIN_SHARED_LIBS} CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_USE_LLVM_UNWINDER ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_ENABLE_STATIC_UNWINDER ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_USE_COMPILER_RT ON CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_ENABLE_NEW_DELETE_DEFINITIONS OFF CACHE BOOL "")
-set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_ENABLE_SHARED OFF CACHE BOOL "")
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXXABI_ENABLE_SHARED ${TOOLCHAIN_SHARED_LIBS} CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_USE_COMPILER_RT ON CACHE BOOL "")
-set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ENABLE_SHARED OFF CACHE BOOL "")
+set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ENABLE_SHARED ${TOOLCHAIN_SHARED_LIBS} CACHE BOOL "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ABI_VERSION ${LIBCXX_ABI_VERSION} CACHE STRING "")
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_CXX_ABI "libcxxabi" CACHE STRING "") #!!!
set(RUNTIMES_${TOOLCHAIN_TARGET_TRIPLE}_LIBCXX_ENABLE_NEW_DELETE_DEFINITIONS ON CACHE BOOL "")
>From ca9b056067b7edfd39c90f90b2094eabba5aed67 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 22:52:47 -0700
Subject: [PATCH 34/57] [RISCV] Remove many ImmType parameters from tablegen
classes. NFC
These usually have a single value that is always used. We can just
hardcode into the class body.
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 61 +++++++++----------
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 5 +-
2 files changed, 31 insertions(+), 35 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 1a514812c7361..4a67b1b4c56d3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2317,11 +2317,11 @@ multiclass VPseudoVSLD1_VF<string Constraint = ""> {
}
}
-multiclass VPseudoBinaryV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
+multiclass VPseudoBinaryV_VI<Operand ImmType, LMULInfo m, string Constraint = ""> {
defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
-multiclass VPseudoBinaryV_VI_RM<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
+multiclass VPseudoBinaryV_VI_RM<Operand ImmType, LMULInfo m, string Constraint = ""> {
defm _VI : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, ImmType, m, Constraint>;
}
@@ -2696,13 +2696,13 @@ multiclass VPseudoBinaryM_VI<LMULInfo m, int TargetConstraintType = 1> {
!if(!ge(m.octuple, 16), "@earlyclobber $rd", ""), TargetConstraintType>;
}
-multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVGTR_VV_VX_VI<string Constraint = ""> {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VX<m, Constraint>,
SchedBinary<"WriteVRGatherVX", "ReadVRGatherVX_data",
"ReadVRGatherVX_index", mx, forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
+ defm "" : VPseudoBinaryV_VI<uimm5, m, Constraint>,
SchedUnary<"WriteVRGatherVI", "ReadVRGatherVI_data", mx,
forceMergeOpRead=true>;
@@ -2715,8 +2715,7 @@ multiclass VPseudoVGTR_VV_VX_VI<Operand ImmType = simm5, string Constraint = "">
}
}
-multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = "",
- bit Commutable = 0> {
+multiclass VPseudoVSALU_VV_VX_VI<string Constraint = "", bit Commutable = 0> {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m, Constraint, Commutable=Commutable>,
@@ -2725,13 +2724,13 @@ multiclass VPseudoVSALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""
defm "" : VPseudoBinaryV_VX<m, Constraint>,
SchedBinary<"WriteVSALUX", "ReadVSALUV", "ReadVSALUX", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
+ defm "" : VPseudoBinaryV_VI<simm5, m, Constraint>,
SchedUnary<"WriteVSALUI", "ReadVSALUV", mx, forceMergeOpRead=true>;
}
}
-multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVSHT_VV_VX_VI<string Constraint = ""> {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m, Constraint>,
@@ -2740,12 +2739,12 @@ multiclass VPseudoVSHT_VV_VX_VI<Operand ImmType = simm5, string Constraint = "">
defm "" : VPseudoBinaryV_VX<m, Constraint>,
SchedBinary<"WriteVShiftX", "ReadVShiftV", "ReadVShiftX", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
+ defm "" : VPseudoBinaryV_VI<uimm5, m, Constraint>,
SchedUnary<"WriteVShiftI", "ReadVShiftV", mx, forceMergeOpRead=true>;
}
}
-multiclass VPseudoVSSHT_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVSSHT_VV_VX_VI_RM<string Constraint = ""> {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV_RM<m, Constraint>,
@@ -2754,13 +2753,12 @@ multiclass VPseudoVSSHT_VV_VX_VI_RM<Operand ImmType = simm5, string Constraint =
defm "" : VPseudoBinaryV_VX_RM<m, Constraint>,
SchedBinary<"WriteVSShiftX", "ReadVSShiftV", "ReadVSShiftX", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VI_RM<ImmType, m, Constraint>,
+ defm "" : VPseudoBinaryV_VI_RM<uimm5, m, Constraint>,
SchedUnary<"WriteVSShiftI", "ReadVSShiftV", mx, forceMergeOpRead=true>;
}
}
-multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = "",
- bit Commutable = 0> {
+multiclass VPseudoVALU_VV_VX_VI<string Constraint = "", bit Commutable = 0> {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VV<m, Constraint, Commutable=Commutable>,
@@ -2769,7 +2767,7 @@ multiclass VPseudoVALU_VV_VX_VI<Operand ImmType = simm5, string Constraint = "",
defm "" : VPseudoBinaryV_VX<m, Constraint>,
SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VI<ImmType, m, Constraint>,
+ defm "" : VPseudoBinaryV_VI<simm5, m, Constraint>,
SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
}
}
@@ -2962,13 +2960,13 @@ multiclass VPseudoVALU_VF_RM {
}
}
-multiclass VPseudoVALU_VX_VI<Operand ImmType = simm5> {
+multiclass VPseudoVALU_VX_VI {
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoBinaryV_VX<m>,
SchedBinary<"WriteVIALUX", "ReadVIALUV", "ReadVIALUX", mx,
forceMergeOpRead=true>;
- defm "" : VPseudoBinaryV_VI<ImmType, m>,
+ defm "" : VPseudoBinaryV_VI<simm5, m>,
SchedUnary<"WriteVIALUI", "ReadVIALUV", mx, forceMergeOpRead=true>;
}
}
@@ -3333,8 +3331,8 @@ multiclass VPseudoVSLDV_VX<LMULInfo m, string Constraint = ""> {
defm _VX : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
}
-multiclass VPseudoVSLDV_VI<Operand ImmType = simm5, LMULInfo m, string Constraint = ""> {
- defm _VI : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
+multiclass VPseudoVSLDV_VI<LMULInfo m, string Constraint = ""> {
+ defm _VI : VPseudoVSLDVWithPolicy<m.vrclass, m.vrclass, uimm5, m, Constraint>;
}
multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
@@ -3366,15 +3364,14 @@ multiclass VPseudoVMAC_VV_VF_AAXA_RM<string Constraint = ""> {
}
}
-multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, bit slidesUp = false,
- string Constraint = ""> {
+multiclass VPseudoVSLD_VX_VI<bit slidesUp = false, string Constraint = ""> {
defvar WriteSlideX = !if(slidesUp, "WriteVSlideUpX", "WriteVSlideDownX");
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoVSLDV_VX<m, Constraint>,
SchedTernary<WriteSlideX, "ReadVISlideV", "ReadVISlideV",
"ReadVISlideX", mx>;
- defm "" : VPseudoVSLDV_VI<ImmType, m, Constraint>,
+ defm "" : VPseudoVSLDV_VI<m, Constraint>,
SchedBinary<"WriteVSlideI", "ReadVISlideV", "ReadVISlideV", mx>;
}
}
@@ -5441,7 +5438,7 @@ multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
multiclass VPatBinaryV_VV_VX_VI_RM<string intrinsic, string instruction,
- list<VTypeInfo> vtilist, Operand ImmType = simm5>
+ list<VTypeInfo> vtilist, Operand ImmType>
: VPatBinaryV_VV_RM<intrinsic, instruction, vtilist>,
VPatBinaryV_VX_RM<intrinsic, instruction, vtilist>,
VPatBinaryV_VI_RM<intrinsic, instruction, vtilist, ImmType>;
@@ -5777,7 +5774,7 @@ multiclass VPatTernaryV_VV_VX_AAXA_RM<string intrinsic, string instruction,
VPatTernaryV_VX_AAXA_RM<intrinsic, instruction, vtilist, isSEWAware>;
multiclass VPatTernaryV_VX_VI<string intrinsic, string instruction,
- list<VTypeInfo> vtilist, Operand Imm_type = simm5>
+ list<VTypeInfo> vtilist, Operand Imm_type>
: VPatTernaryV_VX<intrinsic, instruction, vtilist>,
VPatTernaryV_VI<intrinsic, instruction, vtilist, Imm_type>;
@@ -5809,7 +5806,7 @@ multiclass VPatBinaryM_VX_VI<string intrinsic, string instruction,
VPatBinaryM_VI<intrinsic, instruction, vtilist>;
multiclass VPatBinaryV_VV_VX_VI_INT<string intrinsic, string instruction,
- list<VTypeInfo> vtilist, Operand ImmType = simm5>
+ list<VTypeInfo> vtilist, Operand ImmType>
: VPatBinaryV_VV_INT<intrinsic#"_vv", instruction, vtilist>,
VPatBinaryV_VX_INT<intrinsic#"_vx", instruction, vtilist>,
VPatBinaryV_VI<intrinsic#"_vx", instruction, vtilist, ImmType>;
@@ -6329,9 +6326,9 @@ defm PseudoVXOR : VPseudoVALU_VV_VX_VI<Commutable=1>;
//===----------------------------------------------------------------------===//
// 11.6. Vector Single-Width Bit Shift Instructions
//===----------------------------------------------------------------------===//
-defm PseudoVSLL : VPseudoVSHT_VV_VX_VI<uimm5>;
-defm PseudoVSRL : VPseudoVSHT_VV_VX_VI<uimm5>;
-defm PseudoVSRA : VPseudoVSHT_VV_VX_VI<uimm5>;
+defm PseudoVSLL : VPseudoVSHT_VV_VX_VI;
+defm PseudoVSRL : VPseudoVSHT_VV_VX_VI;
+defm PseudoVSRA : VPseudoVSHT_VV_VX_VI;
//===----------------------------------------------------------------------===//
// 11.7. Vector Narrowing Integer Right Shift Instructions
@@ -6440,8 +6437,8 @@ let Defs = [VXSAT] in {
//===----------------------------------------------------------------------===//
// 12.4. Vector Single-Width Scaling Shift Instructions
//===----------------------------------------------------------------------===//
-defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
-defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM<uimm5>;
+defm PseudoVSSRL : VPseudoVSSHT_VV_VX_VI_RM;
+defm PseudoVSSRA : VPseudoVSSHT_VV_VX_VI_RM;
//===----------------------------------------------------------------------===//
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
@@ -6826,8 +6823,8 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
// 16.3. Vector Slide Instructions
//===----------------------------------------------------------------------===//
let Predicates = [HasVInstructions] in {
- defm PseudoVSLIDEUP : VPseudoVSLD_VX_VI<uimm5, /*slidesUp=*/true, "@earlyclobber $rd">;
- defm PseudoVSLIDEDOWN : VPseudoVSLD_VX_VI<uimm5, /*slidesUp=*/false>;
+ defm PseudoVSLIDEUP : VPseudoVSLD_VX_VI</*slidesUp=*/true, "@earlyclobber $rd">;
+ defm PseudoVSLIDEDOWN : VPseudoVSLD_VX_VI</*slidesUp=*/false>;
defm PseudoVSLIDE1UP : VPseudoVSLD1_VX<"@earlyclobber $rd">;
defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
} // Predicates = [HasVInstructions]
@@ -6841,7 +6838,7 @@ let Predicates = [HasVInstructionsAnyF] in {
// 16.4. Vector Register Gather Instructions
//===----------------------------------------------------------------------===//
let Predicates = [HasVInstructions] in {
-defm PseudoVRGATHER : VPseudoVGTR_VV_VX_VI<uimm5, "@earlyclobber $rd">;
+defm PseudoVRGATHER : VPseudoVGTR_VV_VX_VI<"@earlyclobber $rd">;
defm PseudoVRGATHEREI16 : VPseudoVGTR_EI16_VV<Constraint = "@earlyclobber $rd">;
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 98b5aeef9fe2d..4bae0d0e0be03 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -1025,11 +1025,10 @@ multiclass VPatBinaryV_VV_VX_VROL<string intrinsic, string instruction,
VPatBinaryV_VI_VROL<intrinsic, instruction2, vtilist>;
multiclass VPatBinaryV_VV_VX_VI_VROR<string intrinsic, string instruction,
- list<VTypeInfo> vtilist,
- Operand ImmType = uimm6>
+ list<VTypeInfo> vtilist>
: VPatBinaryV_VV<intrinsic, instruction, vtilist>,
VPatBinaryV_VX_VROTATE<intrinsic, instruction, vtilist>,
- VPatBinaryV_VI<intrinsic, instruction, vtilist, ImmType>;
+ VPatBinaryV_VI<intrinsic, instruction, vtilist, uimm6>;
multiclass VPatBinaryW_VV_VX_VI_VWSLL<string intrinsic, string instruction,
list<VTypeInfoToWide> vtilist>
>From 8721e2358c33c2d788bfc4493895c8844b87d1d7 Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 23:09:42 -0700
Subject: [PATCH 35/57] [RISCV] Remove unused defaults for sew paramters in
tablegen. NFC
Also remove some unused Constraint paramters that appeared before
the sew parameter.
---
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 55 +++++++++----------
llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td | 8 +--
2 files changed, 30 insertions(+), 33 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 4a67b1b4c56d3..1a5fc1c20865e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2194,7 +2194,7 @@ multiclass VPseudoBinaryEmul<VReg RetClass,
LMULInfo lmul,
LMULInfo emul,
string Constraint = "",
- int sew = 0> {
+ int sew> {
let VLMul = lmul.value, SEW=sew in {
defvar suffix = !if(sew, "_" # lmul.MX # "_E" # sew, "_" # lmul.MX);
def suffix # "_" # emul.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
@@ -2246,14 +2246,13 @@ multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = "", bit Commutab
}
// Similar to VPseudoBinaryV_VV, but uses MxListF.
-multiclass VPseudoBinaryFV_VV<LMULInfo m, string Constraint = "", int sew = 0> {
- defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, Constraint, sew>;
+multiclass VPseudoBinaryFV_VV<LMULInfo m, int sew> {
+ defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, "", sew>;
}
-multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, string Constraint = "", int sew = 0> {
+multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, int sew> {
defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
- Constraint, sew,
- UsesVXRM=0>;
+ "", sew, UsesVXRM=0>;
}
multiclass VPseudoVGTR_EI16_VV<string Constraint = ""> {
@@ -2295,14 +2294,14 @@ multiclass VPseudoVSLD1_VX<string Constraint = ""> {
}
}
-multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, string Constraint = "", int sew = 0> {
+multiclass VPseudoBinaryV_VF<LMULInfo m, FPR_Info f, int sew> {
defm "_V" # f.FX : VPseudoBinary<m.vrclass, m.vrclass,
- f.fprclass, m, Constraint, sew>;
+ f.fprclass, m, "", sew>;
}
-multiclass VPseudoBinaryV_VF_RM<LMULInfo m, FPR_Info f, string Constraint = "", int sew = 0> {
+multiclass VPseudoBinaryV_VF_RM<LMULInfo m, FPR_Info f, int sew> {
defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass,
- f.fprclass, m, Constraint, sew,
+ f.fprclass, m, "", sew,
UsesVXRM=0>;
}
@@ -2348,7 +2347,7 @@ multiclass VPseudoBinaryW_VV<LMULInfo m, bit Commutable = 0> {
Commutable=Commutable>;
}
-multiclass VPseudoBinaryW_VV_RM<LMULInfo m, int sew = 0> {
+multiclass VPseudoBinaryW_VV_RM<LMULInfo m, int sew> {
defm _VV : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
"@earlyclobber $rd", sew, UsesVXRM=0,
TargetConstraintType=3>;
@@ -2364,7 +2363,7 @@ multiclass VPseudoBinaryW_VI<Operand ImmType, LMULInfo m> {
"@earlyclobber $rd", TargetConstraintType=3>;
}
-multiclass VPseudoBinaryW_VF_RM<LMULInfo m, FPR_Info f, int sew = 0> {
+multiclass VPseudoBinaryW_VF_RM<LMULInfo m, FPR_Info f, int sew> {
defm "_V" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.vrclass,
f.fprclass, m,
"@earlyclobber $rd", sew,
@@ -2379,7 +2378,7 @@ multiclass VPseudoBinaryW_WV<LMULInfo m> {
"@earlyclobber $rd", TargetConstraintType=3>;
}
-multiclass VPseudoBinaryW_WV_RM<LMULInfo m, int sew = 0> {
+multiclass VPseudoBinaryW_WV_RM<LMULInfo m, int sew> {
defm _WV : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass, m.vrclass, m,
"@earlyclobber $rd", sew, UsesVXRM = 0,
TargetConstraintType = 3>;
@@ -2392,7 +2391,7 @@ multiclass VPseudoBinaryW_WX<LMULInfo m> {
defm "_WX" : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m, /*Constraint*/ "", TargetConstraintType=3>;
}
-multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f, int sew = 0> {
+multiclass VPseudoBinaryW_WF_RM<LMULInfo m, FPR_Info f, int sew> {
defm "_W" # f.FX : VPseudoBinaryRoundingMode<m.wvrclass, m.wvrclass,
f.fprclass, m,
Constraint="",
@@ -2844,14 +2843,14 @@ multiclass VPseudoVDIV_VV_VX {
multiclass VPseudoVFMUL_VV_VF_RM {
foreach m = MxListF in {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
- defm "" : VPseudoBinaryFV_VV_RM<m, "", sew=e>,
+ defm "" : VPseudoBinaryFV_VV_RM<m, e>,
SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX, e,
forceMergeOpRead=true>;
}
foreach f = FPList in {
foreach m = f.MxList in {
- defm "" : VPseudoBinaryV_VF_RM<m, f, "", sew=f.SEW>,
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX,
f.SEW, forceMergeOpRead=true>;
}
@@ -2863,7 +2862,7 @@ multiclass VPseudoVFDIV_VV_VF_RM {
defvar mx = m.MX;
defvar sews = SchedSEWSet<mx, isF=1>.val;
foreach e = sews in {
- defm "" : VPseudoBinaryFV_VV_RM<m, "", e>,
+ defm "" : VPseudoBinaryFV_VV_RM<m, e>,
SchedBinary<"WriteVFDivV", "ReadVFDivV", "ReadVFDivV", mx, e,
forceMergeOpRead=true>;
}
@@ -2871,7 +2870,7 @@ multiclass VPseudoVFDIV_VV_VF_RM {
foreach f = FPList in {
foreach m = f.MxList in {
- defm "" : VPseudoBinaryV_VF_RM<m, f, "", f.SEW>,
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
forceMergeOpRead=true>;
}
@@ -2881,7 +2880,7 @@ multiclass VPseudoVFDIV_VV_VF_RM {
multiclass VPseudoVFRDIV_VF_RM {
foreach f = FPList in {
foreach m = f.MxList in {
- defm "" : VPseudoBinaryV_VF_RM<m, f, "", f.SEW>,
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFDivF", "ReadVFDivV", "ReadVFDivF", m.MX, f.SEW,
forceMergeOpRead=true>;
}
@@ -2936,14 +2935,14 @@ multiclass VPseudoVMAX_VV_VF {
multiclass VPseudoVALU_VV_VF_RM {
foreach m = MxListF in {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
- defm "" : VPseudoBinaryFV_VV_RM<m, "", sew=e>,
+ defm "" : VPseudoBinaryFV_VV_RM<m, e>,
SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX, e,
forceMergeOpRead=true>;
}
foreach f = FPList in {
foreach m = f.MxList in {
- defm "" : VPseudoBinaryV_VF_RM<m, f, "", sew=f.SEW>,
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
f.SEW, forceMergeOpRead=true>;
}
@@ -2953,7 +2952,7 @@ multiclass VPseudoVALU_VV_VF_RM {
multiclass VPseudoVALU_VF_RM {
foreach f = FPList in {
foreach m = f.MxList in {
- defm "" : VPseudoBinaryV_VF_RM<m, f, "", sew=f.SEW>,
+ defm "" : VPseudoBinaryV_VF_RM<m, f, f.SEW>,
SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX,
f.SEW, forceMergeOpRead=true>;
}
@@ -3246,8 +3245,8 @@ multiclass VPseudoTernaryWithPolicyRoundingMode<VReg RetClass,
RegisterClass Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
- string Constraint = "",
- int sew = 0,
+ string Constraint,
+ int sew,
bit Commutable = 0,
int TargetConstraintType = 1> {
let VLMul = MInfo.value in {
@@ -3271,7 +3270,7 @@ multiclass VPseudoTernaryV_VV_AAXA<LMULInfo m, string Constraint = ""> {
Constraint, Commutable=1>;
}
-multiclass VPseudoTernaryV_VV_AAXA_RM<LMULInfo m, string Constraint = "", int sew = 0> {
+multiclass VPseudoTernaryV_VV_AAXA_RM<LMULInfo m, string Constraint, int sew> {
defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
Constraint, sew, Commutable=1>;
}
@@ -3282,7 +3281,7 @@ multiclass VPseudoTernaryV_VX_AAXA<LMULInfo m, string Constraint = ""> {
}
multiclass VPseudoTernaryV_VF_AAXA_RM<LMULInfo m, FPR_Info f,
- string Constraint = "", int sew = 0> {
+ string Constraint, int sew> {
defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.vrclass, f.fprclass,
m.vrclass, m, Constraint,
sew, Commutable=1>;
@@ -3294,7 +3293,7 @@ multiclass VPseudoTernaryW_VV<LMULInfo m, bit Commutable = 0> {
constraint, Commutable=Commutable, TargetConstraintType=3>;
}
-multiclass VPseudoTernaryW_VV_RM<LMULInfo m, int sew = 0> {
+multiclass VPseudoTernaryW_VV_RM<LMULInfo m, int sew> {
defvar constraint = "@earlyclobber $rd";
defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
constraint, sew, /* Commutable */ 0,
@@ -3307,7 +3306,7 @@ multiclass VPseudoTernaryW_VX<LMULInfo m> {
constraint, /*Commutable*/ 0, TargetConstraintType=3>;
}
-multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f, int sew = 0> {
+multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f, int sew> {
defvar constraint = "@earlyclobber $rd";
defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, f.fprclass,
m.vrclass, m, constraint,
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index 4bae0d0e0be03..75fcc1e7cb110 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -251,11 +251,9 @@ multiclass VPseudoBinaryNoMaskPolicy_Zvk<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo,
- string Constraint = "",
- int sew = 0> {
- let VLMul = MInfo.value, SEW=sew in {
- defvar suffix = !if(sew, "_" # MInfo.MX # "_E" # sew, "_" # MInfo.MX);
- def suffix : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
+ string Constraint = ""> {
+ let VLMul = MInfo.value in {
+ def "_" # MInfo.MX : VPseudoBinaryNoMaskPolicy<RetClass, Op1Class, Op2Class,
Constraint>;
}
}
>From 3ab978622ecd612f3ebb98f00b68782fe8b186de Mon Sep 17 00:00:00 2001
From: Shivam Gupta <shivam98.tkg at gmail.com>
Date: Sat, 8 Jun 2024 12:02:01 +0530
Subject: [PATCH 36/57] [lldb] Remove redundant c_str() calls in stream output
(NFC) (#94839)
Passing the result of c_str() to a stream is slow and redundant. This
change removes unnecessary c_str() calls and uses the string object
directly.
Caught by cppcheck -
lldb/tools/debugserver/source/JSON.cpp:398:19: performance: Passing the
result of c_str() to a stream is slow and redundant. [stlcstrStream]
lldb/tools/debugserver/source/JSON.cpp:408:64: performance: Passing the
result of c_str() to a stream is slow and redundant. [stlcstrStream]
lldb/tools/debugserver/source/JSON.cpp:420:54: performance: Passing the
result of c_str() to a stream is slow and redundant. [stlcstrStream]
lldb/tools/debugserver/source/JSON.cpp:46:13: performance: Passing the
result of c_str() to a stream is slow and redundant. [stlcstrStream]
Fix #91212
---
lldb/tools/debugserver/source/JSON.cpp | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/lldb/tools/debugserver/source/JSON.cpp b/lldb/tools/debugserver/source/JSON.cpp
index 315c52aafc932..5453d857cb214 100644
--- a/lldb/tools/debugserver/source/JSON.cpp
+++ b/lldb/tools/debugserver/source/JSON.cpp
@@ -43,7 +43,7 @@ JSONString::JSONString(const std::string &s)
: JSONValue(JSONValue::Kind::String), m_data(s) {}
void JSONString::Write(std::ostream &s) {
- s << "\"" << json_string_quote_metachars(m_data).c_str() << "\"";
+ s << "\"" << json_string_quote_metachars(m_data) << "\"";
}
uint64_t JSONNumber::GetAsUnsigned() const {
@@ -395,7 +395,7 @@ JSONParser::Token JSONParser::GetToken(std::string &value) {
} else {
error << "error: got exponent character but no exponent digits at "
"offset in float value \""
- << value.c_str() << "\"";
+ << value << "\"";
value = error.str();
return Token::Status;
}
@@ -405,8 +405,7 @@ JSONParser::Token JSONParser::GetToken(std::string &value) {
if (got_frac_digits) {
return Token::Float;
} else {
- error << "error: no digits after decimal point \"" << value.c_str()
- << "\"";
+ error << "error: no digits after decimal point \"" << value << "\"";
value = error.str();
return Token::Status;
}
@@ -417,7 +416,7 @@ JSONParser::Token JSONParser::GetToken(std::string &value) {
// We need at least some integer digits to make an integer
return Token::Integer;
} else {
- error << "error: no digits negate sign \"" << value.c_str() << "\"";
+ error << "error: no digits negate sign \"" << value << "\"";
value = error.str();
return Token::Status;
}
>From e55a6dfcc869e2ad9ce9f582547e0bdf85fc413d Mon Sep 17 00:00:00 2001
From: Daniil Kovalev <dkovalev at accesssoftek.com>
Date: Sat, 8 Jun 2024 09:33:11 +0300
Subject: [PATCH 37/57] Revert "[lld][AArch64][ELF][PAC] Support
`.relr.auth.dyn` section" (#94843)
Reverts llvm/llvm-project#87635
On some corner cases, lld generated an object file with an empty REL
section with `sh_info` set to 0. This file triggers an lld error when
used as its input. See
https://github.com/llvm/llvm-project/pull/87635#issuecomment-2155318065
for details.
---
lld/ELF/Arch/AArch64.cpp | 13 ---
lld/ELF/Relocations.cpp | 10 +-
lld/ELF/SyntheticSections.cpp | 24 ++---
lld/ELF/SyntheticSections.h | 15 +--
lld/ELF/Writer.cpp | 35 ------
lld/test/ELF/aarch64-reloc-pauth.s | 166 ++++++-----------------------
6 files changed, 47 insertions(+), 216 deletions(-)
diff --git a/lld/ELF/Arch/AArch64.cpp b/lld/ELF/Arch/AArch64.cpp
index cf5c2380690f1..47e6ea1ff7756 100644
--- a/lld/ELF/Arch/AArch64.cpp
+++ b/lld/ELF/Arch/AArch64.cpp
@@ -429,19 +429,6 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
case R_AARCH64_PREL64:
write64(loc, val);
break;
- case R_AARCH64_AUTH_ABS64:
- // If val is wider than 32 bits, the relocation must have been moved from
- // .relr.auth.dyn to .rela.dyn, and the addend write is not needed.
- //
- // If val fits in 32 bits, we have two potential scenarios:
- // * True RELR: Write the 32-bit `val`.
- // * RELA: Even if the value now fits in 32 bits, it might have been
- // converted from RELR during an iteration in
- // finalizeAddressDependentContent(). Writing the value is harmless
- // because dynamic linking ignores it.
- if (isInt<32>(val))
- write32(loc, val);
- break;
case R_AARCH64_ADD_ABS_LO12_NC:
or32AArch64Imm(loc, val);
break;
diff --git a/lld/ELF/Relocations.cpp b/lld/ELF/Relocations.cpp
index 2c02c2e572bfd..04db413a6609f 100644
--- a/lld/ELF/Relocations.cpp
+++ b/lld/ELF/Relocations.cpp
@@ -898,9 +898,9 @@ static void addRelativeReloc(InputSectionBase &isec, uint64_t offsetInSec,
isec.addReloc({expr, type, offsetInSec, addend, &sym});
if (shard)
part.relrDyn->relocsVec[parallel::getThreadIndex()].push_back(
- {&isec, isec.relocs().size() - 1});
+ {&isec, offsetInSec});
else
- part.relrDyn->relocs.push_back({&isec, isec.relocs().size() - 1});
+ part.relrDyn->relocs.push_back({&isec, offsetInSec});
return;
}
part.relaDyn->addRelativeReloc<shard>(target->relativeRel, isec, offsetInSec,
@@ -1154,12 +1154,6 @@ void RelocationScanner::processAux(RelExpr expr, RelType type, uint64_t offset,
// relative relocation. Use a symbolic relocation instead.
if (sym.isPreemptible) {
part.relaDyn->addSymbolReloc(type, *sec, offset, sym, addend, type);
- } else if (part.relrAuthDyn && sec->addralign >= 2 && offset % 2 == 0) {
- // When symbol values are determined in
- // finalizeAddressDependentContent, some .relr.auth.dyn relocations
- // may be moved to .rela.dyn.
- sec->addReloc({expr, type, offset, addend, &sym});
- part.relrAuthDyn->relocs.push_back({sec, sec->relocs().size() - 1});
} else {
part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, sec, offset,
DynamicReloc::AddendOnlyWithTargetVA, sym,
diff --git a/lld/ELF/SyntheticSections.cpp b/lld/ELF/SyntheticSections.cpp
index ad280289cebf9..cc423d152e912 100644
--- a/lld/ELF/SyntheticSections.cpp
+++ b/lld/ELF/SyntheticSections.cpp
@@ -1420,12 +1420,6 @@ DynamicSection<ELFT>::computeContents() {
addInt(config->useAndroidRelrTags ? DT_ANDROID_RELRENT : DT_RELRENT,
sizeof(Elf_Relr));
}
- if (part.relrAuthDyn && part.relrAuthDyn->getParent() &&
- !part.relrAuthDyn->relocs.empty()) {
- addInSec(DT_AARCH64_AUTH_RELR, *part.relrAuthDyn);
- addInt(DT_AARCH64_AUTH_RELRSZ, part.relrAuthDyn->getParent()->size);
- addInt(DT_AARCH64_AUTH_RELRENT, sizeof(Elf_Relr));
- }
if (isMain && in.relaPlt->isNeeded()) {
addInSec(DT_JMPREL, *in.relaPlt);
entries.emplace_back(DT_PLTRELSZ, addPltRelSz());
@@ -1737,13 +1731,10 @@ template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *buf) {
}
}
-RelrBaseSection::RelrBaseSection(unsigned concurrency, bool isAArch64Auth)
- : SyntheticSection(
- SHF_ALLOC,
- isAArch64Auth
- ? SHT_AARCH64_AUTH_RELR
- : (config->useAndroidRelrTags ? SHT_ANDROID_RELR : SHT_RELR),
- config->wordsize, isAArch64Auth ? ".relr.auth.dyn" : ".relr.dyn"),
+RelrBaseSection::RelrBaseSection(unsigned concurrency)
+ : SyntheticSection(SHF_ALLOC,
+ config->useAndroidRelrTags ? SHT_ANDROID_RELR : SHT_RELR,
+ config->wordsize, ".relr.dyn"),
relocsVec(concurrency) {}
void RelrBaseSection::mergeRels() {
@@ -2011,8 +2002,8 @@ bool AndroidPackedRelocationSection<ELFT>::updateAllocSize() {
}
template <class ELFT>
-RelrSection<ELFT>::RelrSection(unsigned concurrency, bool isAArch64Auth)
- : RelrBaseSection(concurrency, isAArch64Auth) {
+RelrSection<ELFT>::RelrSection(unsigned concurrency)
+ : RelrBaseSection(concurrency) {
this->entsize = config->wordsize;
}
@@ -4783,9 +4774,6 @@ template <class ELFT> void elf::createSyntheticSections() {
if (config->relrPackDynRelocs) {
part.relrDyn = std::make_unique<RelrSection<ELFT>>(threadCount);
add(*part.relrDyn);
- part.relrAuthDyn = std::make_unique<RelrSection<ELFT>>(
- threadCount, /*isAArch64Auth=*/true);
- add(*part.relrAuthDyn);
}
if (!config->relocatable) {
diff --git a/lld/ELF/SyntheticSections.h b/lld/ELF/SyntheticSections.h
index eaa09ea7194fb..34949025a45f7 100644
--- a/lld/ELF/SyntheticSections.h
+++ b/lld/ELF/SyntheticSections.h
@@ -548,9 +548,7 @@ class RelocationBaseSection : public SyntheticSection {
static bool classof(const SectionBase *d) {
return SyntheticSection::classof(d) &&
(d->type == llvm::ELF::SHT_RELA || d->type == llvm::ELF::SHT_REL ||
- d->type == llvm::ELF::SHT_RELR ||
- (d->type == llvm::ELF::SHT_AARCH64_AUTH_RELR &&
- config->emachine == llvm::ELF::EM_AARCH64));
+ d->type == llvm::ELF::SHT_RELR);
}
int32_t dynamicTag, sizeDynamicTag;
SmallVector<DynamicReloc, 0> relocs;
@@ -598,17 +596,15 @@ class AndroidPackedRelocationSection final : public RelocationBaseSection {
};
struct RelativeReloc {
- uint64_t getOffset() const {
- return inputSec->getVA(inputSec->relocs()[relocIdx].offset);
- }
+ uint64_t getOffset() const { return inputSec->getVA(offsetInSec); }
const InputSectionBase *inputSec;
- size_t relocIdx;
+ uint64_t offsetInSec;
};
class RelrBaseSection : public SyntheticSection {
public:
- RelrBaseSection(unsigned concurrency, bool isAArch64Auth = false);
+ RelrBaseSection(unsigned concurrency);
void mergeRels();
bool isNeeded() const override {
return !relocs.empty() ||
@@ -626,7 +622,7 @@ template <class ELFT> class RelrSection final : public RelrBaseSection {
using Elf_Relr = typename ELFT::Relr;
public:
- RelrSection(unsigned concurrency, bool isAArch64Auth = false);
+ RelrSection(unsigned concurrency);
bool updateAllocSize() override;
size_t getSize() const override { return relrRelocs.size() * this->entsize; }
@@ -1464,7 +1460,6 @@ struct Partition {
std::unique_ptr<PackageMetadataNote> packageMetadataNote;
std::unique_ptr<RelocationBaseSection> relaDyn;
std::unique_ptr<RelrBaseSection> relrDyn;
- std::unique_ptr<RelrBaseSection> relrAuthDyn;
std::unique_ptr<VersionDefinitionSection> verDef;
std::unique_ptr<SyntheticSection> verNeed;
std::unique_ptr<VersionTableSection> verSym;
diff --git a/lld/ELF/Writer.cpp b/lld/ELF/Writer.cpp
index 0aceb941a1dc9..640cb2a445f7d 100644
--- a/lld/ELF/Writer.cpp
+++ b/lld/ELF/Writer.cpp
@@ -1458,32 +1458,9 @@ template <class ELFT> void Writer<ELFT>::finalizeAddressDependentContent() {
in.mipsGot->updateAllocSize();
for (Partition &part : partitions) {
- // The R_AARCH64_AUTH_RELATIVE has a smaller addend field as bits [63:32]
- // encode the signing schema. We've put relocations in .relr.auth.dyn
- // during RelocationScanner::processAux, but the target VA for some of
- // them might be wider than 32 bits. We can only know the final VA at this
- // point, so move relocations with large values from .relr.auth.dyn to
- // .rela.dyn. See also AArch64::relocate.
- if (part.relrAuthDyn) {
- auto it = llvm::remove_if(
- part.relrAuthDyn->relocs, [&part](const RelativeReloc &elem) {
- const Relocation &reloc = elem.inputSec->relocs()[elem.relocIdx];
- if (isInt<32>(reloc.sym->getVA(reloc.addend)))
- return false;
- part.relaDyn->addReloc({R_AARCH64_AUTH_RELATIVE, elem.inputSec,
- reloc.offset,
- DynamicReloc::AddendOnlyWithTargetVA,
- *reloc.sym, reloc.addend, R_ABS});
- return true;
- });
- changed |= (it != part.relrAuthDyn->relocs.end());
- part.relrAuthDyn->relocs.erase(it, part.relrAuthDyn->relocs.end());
- }
changed |= part.relaDyn->updateAllocSize();
if (part.relrDyn)
changed |= part.relrDyn->updateAllocSize();
- if (part.relrAuthDyn)
- changed |= part.relrAuthDyn->updateAllocSize();
if (part.memtagGlobalDescriptors)
changed |= part.memtagGlobalDescriptors->updateAllocSize();
}
@@ -1647,14 +1624,6 @@ static void removeUnusedSyntheticSections() {
auto *sec = cast<SyntheticSection>(s);
if (sec->getParent() && sec->isNeeded())
return false;
- // .relr.auth.dyn relocations may be moved to .rela.dyn in
- // finalizeAddressDependentContent, making .rela.dyn no longer empty.
- // Conservatively keep .rela.dyn. .relr.auth.dyn can be made empty, but
- // we would fail to remove it here.
- if (config->emachine == EM_AARCH64 && config->relrPackDynRelocs)
- if (auto *relSec = dyn_cast<RelocationBaseSection>(sec))
- if (relSec == mainPart->relaDyn.get())
- return false;
unused.insert(sec);
return true;
});
@@ -1967,10 +1936,6 @@ template <class ELFT> void Writer<ELFT>::finalizeSections() {
part.relrDyn->mergeRels();
finalizeSynthetic(part.relrDyn.get());
}
- if (part.relrAuthDyn) {
- part.relrAuthDyn->mergeRels();
- finalizeSynthetic(part.relrAuthDyn.get());
- }
finalizeSynthetic(part.dynSymTab.get());
finalizeSynthetic(part.gnuHashTab.get());
diff --git a/lld/test/ELF/aarch64-reloc-pauth.s b/lld/test/ELF/aarch64-reloc-pauth.s
index f1ce29eaae467..b603d8ffdcabd 100644
--- a/lld/test/ELF/aarch64-reloc-pauth.s
+++ b/lld/test/ELF/aarch64-reloc-pauth.s
@@ -1,13 +1,11 @@
# REQUIRES: aarch64
-# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=aarch64 %p/Inputs/shared2.s -o %t.a.o
+# RUN: ld.lld -shared %t.a.o -soname=so -o %t.a.so
+# RUN: llvm-mc -filetype=obj -triple=aarch64 %s -o %t.o
-# RUN: llvm-mc -filetype=obj -triple=aarch64 %p/Inputs/shared2.s -o a.o
-# RUN: ld.lld -shared a.o -soname=so -o a.so
-# RUN: llvm-mc -filetype=obj -triple=aarch64 main.s -o main.o
-
-# RUN: ld.lld -pie main.o a.so -o main
-# RUN: llvm-readobj -r main | FileCheck --check-prefix=UNPACKED %s
+# RUN: ld.lld -pie %t.o %t.a.so -o %t
+# RUN: llvm-readobj -r %t | FileCheck --check-prefix=UNPACKED %s
# UNPACKED: Section ({{.+}}) .rela.dyn {
# UNPACKED-NEXT: 0x30470 R_AARCH64_AUTH_RELATIVE - 0x1
@@ -23,8 +21,8 @@
# UNPACKED-NEXT: 0x304B0 R_AARCH64_AUTH_ABS64 bar2 0x0
# UNPACKED-NEXT: }
-# RUN: ld.lld main.o a.so -o main.nopie
-# RUN: llvm-readobj -r main.nopie | FileCheck --check-prefix=NOPIE %s
+# RUN: ld.lld %t.o %t.a.so -o %t.nopie
+# RUN: llvm-readobj -r %t.nopie | FileCheck --check-prefix=NOPIE %s
# NOPIE: Section ({{.+}}) .rela.dyn {
# NOPIE: 0x230460 R_AARCH64_AUTH_RELATIVE - 0x200001
@@ -40,95 +38,67 @@
# NOPIE-NEXT: 0x2304A0 R_AARCH64_AUTH_ABS64 bar2 0x0
# NOPIE-NEXT: }
-# RUN: ld.lld -pie -z pack-relative-relocs main.o a.so -o main.pie
-# RUN: llvm-readelf -S -d -r -x .test main.pie | FileCheck --check-prefixes=RELR,HEX %s
-
-# RELR: Section Headers:
-# RELR-NEXT: Name Type Address Off Size ES Flg Lk Inf Al
-# RELR: .rela.dyn RELA {{0*}}[[ADDR1:.+]] {{0*}}[[ADDR1]] 000090 18 A 1 0 8
-# RELR: .relr.auth.dyn AARCH64_AUTH_RELR {{0*}}[[ADDR2:.+]] {{0*}}[[ADDR2]] 000018 08 A 0 0 8
-
-# RELR: Dynamic section at offset {{.+}} contains 16 entries
-# RELR: 0x0000000070000012 (AARCH64_AUTH_RELR) 0x[[ADDR2]]
-# RELR-NEXT: 0x0000000070000011 (AARCH64_AUTH_RELRSZ) 24 (bytes)
-# RELR-NEXT: 0x0000000070000013 (AARCH64_AUTH_RELRENT) 8 (bytes)
-
-## Decoded SHT_RELR section is same as UNPACKED,
-## but contains only the relative relocations.
-## Any relative relocations with odd offset or value wider than 32 bits stay in SHT_RELA.
-
-# RELR: Relocation section '.rela.dyn' at offset 0x[[ADDR1]] contains 6 entries:
-# RELR-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
-# RELR-NEXT: 0000000000030460 0000000000000411 R_AARCH64_AUTH_RELATIVE 123456789a
-# RELR-NEXT: 0000000000030468 0000000000000411 R_AARCH64_AUTH_RELATIVE ffffffedcba98766
-# RELR-NEXT: 0000000000030470 0000000000000411 R_AARCH64_AUTH_RELATIVE 8003043f
-# RELR-NEXT: 0000000000030489 0000000000000411 R_AARCH64_AUTH_RELATIVE 4
-# RELR-NEXT: 0000000000030478 0000000100000244 R_AARCH64_AUTH_ABS64 0000000000000000 zed2 + 1111
-# RELR-NEXT: 0000000000030480 0000000200000244 R_AARCH64_AUTH_ABS64 0000000000000000 bar2 + 0
-# RELR-EMPTY:
-# RELR-NEXT: Relocation section '.relr.auth.dyn' at offset 0x[[ADDR2]] contains 5 entries:
-# RELR-NEXT: Index: Entry Address Symbolic Address
-# RELR-NEXT: 0000: 0000000000030440 0000000000030440 $d.0
-# RELR-NEXT: 0001: 000000000000000f 0000000000030448 $d.0 + 0x8
-# RELR-NEXT: 0000000000030450 $d.0 + 0x10
-# RELR-NEXT: 0000000000030458 $d.0 + 0x18
-# RELR-NEXT: 0002: 0000000000030492 0000000000030492 $d.0 + 0x52
+# RUN: ld.lld -pie %t.o %t.a.so -o %t.pie
+# RUN: llvm-readelf -S -d -r -x .test %t.pie | FileCheck --check-prefixes=PIE,HEX %s
+
+# PIE: Section Headers:
+# PIE-NEXT: Name Type Address Off Size ES Flg Lk Inf Al
+# PIE: .rela.dyn RELA {{0*}}[[#%x,ADDR1:]]
+# PIE-SAME: {{0*}}[[#ADDR1]] 000108 18 A 1 0 8
+
+# PIE: Relocation section '.rela.dyn' at offset 0x[[#ADDR1]] contains 11 entries:
+# PIE-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
+# PIE-NEXT: 0000000000030470 0000000000000411 R_AARCH64_AUTH_RELATIVE 1
+# PIE-NEXT: 0000000000030478 0000000000000411 R_AARCH64_AUTH_RELATIVE 30472
+# PIE-NEXT: 0000000000030480 0000000000000411 R_AARCH64_AUTH_RELATIVE fffffffffffffffd
+# PIE-NEXT: 0000000000030488 0000000000000411 R_AARCH64_AUTH_RELATIVE 12345678
+# PIE-NEXT: 0000000000030490 0000000000000411 R_AARCH64_AUTH_RELATIVE 123456789a
+# PIE-NEXT: 0000000000030498 0000000000000411 R_AARCH64_AUTH_RELATIVE ffffffedcba98766
+# PIE-NEXT: 00000000000304a0 0000000000000411 R_AARCH64_AUTH_RELATIVE 8003046f
+# PIE-NEXT: 00000000000304b9 0000000000000411 R_AARCH64_AUTH_RELATIVE 4
+# PIE-NEXT: 00000000000304c2 0000000000000411 R_AARCH64_AUTH_RELATIVE 30475
+# PIE-NEXT: 00000000000304a8 0000000100000244 R_AARCH64_AUTH_ABS64 0000000000000000 zed2 + 1111
+# PIE-NEXT: 00000000000304b0 0000000200000244 R_AARCH64_AUTH_ABS64 0000000000000000 bar2 + 0
# HEX: Hex dump of section '.test':
-# HEX-NEXT: 0x00030440 01000000 2a000020 42040300 2b000000
-## ^^^^^^^^ Implicit val = 1 = __ehdr_start + 1
+# HEX-NEXT: 0x00030470 00000000 2a000020 00000000 2b000000
## ^^^^ Discr = 42
## ^^ Key (bits 5..6) = DA
-## ^^^^^^^^ Implicit val = 0x30442 = 0x30440 + 2 = .test + 2
## ^^^^ Discr = 43
## ^^ Key (bits 5..6) = IA
-# HEX-NEXT: 0x00030450 fdffffff 2c000080 78563412 2d000020
-## ^^^^^^^^ Implicit val = -3 = __ehdr_start - 3
+# HEX-NEXT: 0x00030480 00000000 2c000080 00000000 2d000020
## ^^^^ Discr = 44
## ^^ Key (bits 5..6) = IA
## ^^ Addr diversity (bit 7) = true
-## ^^^^^^^^ Implicit val = 0x12345678 = __ehdr_start + 0x12345678
## ^^^^ Discr = 45
## ^^ Key (bits 5..6) = DA
-# HEX-NEXT: 0x00030460 00000000 2e000020 00000000 2f000020
-## ^^^^^^^^ No implicit val (rela reloc due val wider than 32 bits)
+# HEX-NEXT: 0x00030490 00000000 2e000020 00000000 2f000020
## ^^^^ Discr = 46
## ^^ Key (bits 5..6) = DA
-## ^^^^^^^^ No implicit val (rela reloc due to val wider than 32 bits)
## ^^^^ Discr = 47
## ^^ Key (bits 5..6) = DA
-# HEX-NEXT: 0x00030470 00000000 30000020 00000000 31000020
-## ^^^^^^^^ No implicit val (rela reloc due val wider than 32 bits)
+# HEX-NEXT: 0x000304a0 00000000 30000020 00000000 31000020
## ^^^^ Discr = 48
## ^^ Key (bits 5..6) = DA
-## ^^^^^^^^ No implicit val (rela reloc due to a preemptible symbol)
## ^^^^ Discr = 49
## ^^ Key (bits 5..6) = DA
-# HEX-NEXT: 0x00030480 00000000 32000000 77000000 00330000
-## ^^^^^^^^ No implicit val (rela reloc due to a preemptible symbol)
+# HEX-NEXT: 0x000304b0 00000000 32000000 77000000 00330000
## ^^^^ Discr = 50
## ^^ Key (bits 5..6) = IA
-## ^^^^^^ ^^ No implicit val (rela reloc due to odd offset)
## ^^^^ Discr = 51
-# HEX-NEXT: 0x00030490 20774504 03003400 0020{{\ }}
+# HEX-NEXT: 0x000304c0 20770000 00003400 0020{{\ }}
## ^^ Key (bits 5..6) = DA
-## ^^^^ ^^^^ Implicit val = 0x30445 = 0x30440 + 5 = .test + 5
## ^^^^ Discr = 52
## ^^ Key (bits 5..6) = DA
-#--- main.s
-
.section .test, "aw"
.p2align 3
.quad (__ehdr_start + 1)@AUTH(da,42)
.quad (.test + 2)@AUTH(ia,43)
.quad (__ehdr_start - 3)@AUTH(ia,44,addr)
.quad (__ehdr_start + 0x12345678)@AUTH(da,45)
-## Addend wider than 32 bits, not enough room for storing implicitly, would go to rela
.quad (__ehdr_start + 0x123456789A)@AUTH(da,46)
-## Negative addend wider than 32 bits, not enough room for storing implicitly, would go to rela
.quad (__ehdr_start - 0x123456789A)@AUTH(da,47)
-## INT32_MAX plus non-zero .test is wider than 32 bits, not enough room for storing implicitly, would go to rela
.quad (.test + 0x7FFFFFFF)@AUTH(da,48)
.quad (zed2 + 0x1111)@AUTH(da,49)
.quad bar2 at AUTH(ia,50)
@@ -136,71 +106,3 @@
.quad (__ehdr_start + 4)@AUTH(da,51)
.byte 0x77
.quad (.test + 5)@AUTH(da,52)
-
-#--- empty-relr.s
-
-## .relr.auth.dyn relocations that do not fit 32 bits are moved to .rela.dyn.
-## In this case .relr.auth.dyn will be made empty, but
-## removeUnusedSyntheticSections fails to remove the section.
-
-# RUN: llvm-mc -filetype=obj -triple=aarch64 empty-relr.s -o empty-relr.o
-# RUN: ld.lld -pie -z pack-relative-relocs empty-relr.o -o empty-relr
-# RUN: llvm-readelf -S -d -r empty-relr | FileCheck --check-prefixes=EMPTY-RELR %s
-
-# EMPTY-RELR: Section Headers:
-# EMPTY-RELR-NEXT: Name Type Address Off Size ES Flg Lk Inf Al
-# EMPTY-RELR: .rela.dyn RELA {{0*}}[[ADDR1:.+]] {{0*}}[[ADDR1]] 000018 18 A 0 0 8
-# EMPTY-RELR: .relr.auth.dyn AARCH64_AUTH_RELR {{0*}}[[ADDR2:.+]] {{0*}}[[ADDR2]] 000000 08 A 0 0 8
-
-# EMPTY-RELR: Dynamic section at offset {{.+}} contains 12 entries
-# EMPTY-RELR-NOT: (AARCH64_AUTH_RELR)
-# EMPTY-RELR-NOT: (AARCH64_AUTH_RELRSZ)
-# EMPTY-RELR-NOT: (AARCH64_AUTH_RELRENT)
-# EMPTY-RELR: 0x0000000000000007 (RELA) 0x[[ADDR1]]
-# EMPTY-RELR-NEXT: 0x0000000000000008 (RELASZ) 24 (bytes)
-# EMPTY-RELR-NEXT: 0x0000000000000009 (RELAENT) 24 (bytes)
-
-# EMPTY-RELR: Relocation section '.rela.dyn' at offset {{.+}} contains 1 entries:
-# EMPTY-RELR-NEXT: Offset Info Type Symbol's Value Symbol's Name + Addend
-# EMPTY-RELR-NEXT: 0000000000030320 0000000000000411 R_AARCH64_AUTH_RELATIVE 8003031f
-# EMPTY-RELR-EMPTY:
-# EMPTY-RELR-NEXT: Relocation section '.relr.auth.dyn' at offset {{.+}} contains 0 entries:
-# EMPTY-RELR-NEXT: Index: Entry Address Symbolic Address
-
-.section .test, "aw"
-.p2align 3
-.quad (.test + 0x7FFFFFFF)@AUTH(da,42)
-
-#--- empty-rela.s
-
-## .relr.auth.dyn relocations that do not fit 32 bits are moved to .rela.dyn.
-## If this scenario does not happen, .rela.dyn will remain empty,
-## but removeUnusedSyntheticSections fails to remove the section.
-
-# RUN: llvm-mc -filetype=obj -triple=aarch64 empty-rela.s -o empty-rela.o
-# RUN: ld.lld -pie -z pack-relative-relocs empty-rela.o -o empty-rela
-# RUN: llvm-readelf -S -d -r empty-rela | FileCheck --check-prefixes=EMPTY-RELA %s
-
-# EMPTY-RELA: Section Headers:
-# EMPTY-RELA-NEXT: Name Type Address Off Size ES Flg Lk Inf Al
-# EMPTY-RELA: .rela.dyn RELA {{0*}}[[ADDR1:.+]] {{0*}}[[ADDR1]] 000000 18 A 0 0 8
-# EMPTY-RELA: .relr.auth.dyn AARCH64_AUTH_RELR {{0*}}[[ADDR2:.+]] {{0*}}[[ADDR2]] 000008 08 A 0 0 8
-
-# EMPTY-RELA: Dynamic section at offset {{.+}} contains 12 entries
-# EMPTY-RELA-NOT: (RELR)
-# EMPTY-RELA-NOT: (RELRSZ)
-# EMPTY-RELA-NOT: (RELRENT)
-# EMPTY-RELA: 0x0000000070000012 (AARCH64_AUTH_RELR) 0x[[ADDR2]]
-# EMPTY-RELA-NEXT: 0x0000000070000011 (AARCH64_AUTH_RELRSZ) 8 (bytes)
-# EMPTY-RELA-NEXT: 0x0000000070000013 (AARCH64_AUTH_RELRENT) 8 (bytes)
-
-# EMPTY-RELA: Relocation section '.rela.dyn' at offset {{.+}} contains 0 entries:
-# EMPTY-RELA-NEXT: Offset Info Type Symbol's Value Symbol's Name
-# EMPTY-RELA-EMPTY:
-# EMPTY-RELA-NEXT: Relocation section '.relr.auth.dyn' at offset {{.+}} contains 1 entries:
-# EMPTY-RELA-NEXT: Index: Entry Address Symbolic Address
-# EMPTY-RELA-NEXT: 0000: 0000000000030310 0000000000030310 $d.0
-
-.section .test, "aw"
-.p2align 3
-.quad (.test + 0x12345678)@AUTH(da,42)
>From 93be6fe142471102d82501efa7593df0c78a63db Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 23:34:17 -0700
Subject: [PATCH 38/57] [RISCV] Replace VPseudoBinaryFV_VV with
VPseudoBinaryV_VV. NFC
---
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 1a5fc1c20865e..d081433046f72 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -2245,11 +2245,6 @@ multiclass VPseudoBinaryV_VV_RM<LMULInfo m, string Constraint = "", bit Commutab
Commutable=Commutable>;
}
-// Similar to VPseudoBinaryV_VV, but uses MxListF.
-multiclass VPseudoBinaryFV_VV<LMULInfo m, int sew> {
- defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m, "", sew>;
-}
-
multiclass VPseudoBinaryFV_VV_RM<LMULInfo m, int sew> {
defm _VV : VPseudoBinaryRoundingMode<m.vrclass, m.vrclass, m.vrclass, m,
"", sew, UsesVXRM=0>;
@@ -2901,7 +2896,7 @@ multiclass VPseudoVALU_VV_VX {
multiclass VPseudoVSGNJ_VV_VF {
foreach m = MxListF in {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
- defm "" : VPseudoBinaryFV_VV<m, sew=e>,
+ defm "" : VPseudoBinaryV_VV<m, sew=e>,
SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX,
e, forceMergeOpRead=true>;
}
@@ -2918,7 +2913,7 @@ multiclass VPseudoVSGNJ_VV_VF {
multiclass VPseudoVMAX_VV_VF {
foreach m = MxListF in {
foreach e = SchedSEWSet<m.MX, isF=1>.val in
- defm "" : VPseudoBinaryFV_VV<m, sew=e>,
+ defm "" : VPseudoBinaryV_VV<m, sew=e>,
SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV",
m.MX, e, forceMergeOpRead=true>;
}
>From 0871fb9c52598110f2aaa002eb827fa9f41c7daf Mon Sep 17 00:00:00 2001
From: Craig Topper <craig.topper at sifive.com>
Date: Fri, 7 Jun 2024 23:57:43 -0700
Subject: [PATCH 39/57] [RISCV] Remove unnecessary setting of parameter with
same default value. NFC
---
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index d081433046f72..1af37322ac02e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3291,21 +3291,21 @@ multiclass VPseudoTernaryW_VV<LMULInfo m, bit Commutable = 0> {
multiclass VPseudoTernaryW_VV_RM<LMULInfo m, int sew> {
defvar constraint = "@earlyclobber $rd";
defm _VV : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, m.vrclass, m.vrclass, m,
- constraint, sew, /* Commutable */ 0,
+ constraint, sew,
TargetConstraintType=3>;
}
multiclass VPseudoTernaryW_VX<LMULInfo m> {
defvar constraint = "@earlyclobber $rd";
defm "_VX" : VPseudoTernaryWithPolicy<m.wvrclass, GPR, m.vrclass, m,
- constraint, /*Commutable*/ 0, TargetConstraintType=3>;
+ constraint, TargetConstraintType=3>;
}
multiclass VPseudoTernaryW_VF_RM<LMULInfo m, FPR_Info f, int sew> {
defvar constraint = "@earlyclobber $rd";
defm "_V" # f.FX : VPseudoTernaryWithPolicyRoundingMode<m.wvrclass, f.fprclass,
m.vrclass, m, constraint,
- sew, /* Commutable */ 0,
+ sew,
TargetConstraintType=3>;
}
>From 2d8cb0515b089e6fe238e68f99b347d66c7e21d4 Mon Sep 17 00:00:00 2001
From: Ben Barham <ben_barham at apple.com>
Date: Sat, 8 Jun 2024 00:32:35 -0700
Subject: [PATCH 40/57] [Support] Do not use `llvm::size` in `getLoopPreheader`
(#94540)
`BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader()` was changed in
7243607867393a2b8ccd477e95e6f62d00f3206f to use `llvm::size` rather than
the checking that `child_begin() + 1 == child_end()`. `llvm::size`
requires that `std::distance` be O(1) and hence that clients support
random access. Use `llvm::hasSingleElement` instead.
---
llvm/include/llvm/Support/GenericLoopInfoImpl.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/llvm/include/llvm/Support/GenericLoopInfoImpl.h b/llvm/include/llvm/Support/GenericLoopInfoImpl.h
index 1e0d0ee446fc4..d19022729ace3 100644
--- a/llvm/include/llvm/Support/GenericLoopInfoImpl.h
+++ b/llvm/include/llvm/Support/GenericLoopInfoImpl.h
@@ -208,7 +208,7 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
return nullptr;
// Make sure there is only one exit out of the preheader.
- if (llvm::size(llvm::children<BlockT *>(Out)) != 1)
+ if (!llvm::hasSingleElement(llvm::children<BlockT *>(Out)))
return nullptr; // Multiple exits from the block, must not be a preheader.
// The predecessor has exactly one successor, so it is a preheader.
>From 1bb726b7416d4464d1c4d68e7ea61883a815c966 Mon Sep 17 00:00:00 2001
From: Jonas Paulsson <paulson1 at linux.ibm.com>
Date: Sat, 8 Jun 2024 09:37:23 +0200
Subject: [PATCH 41/57] [SystemZ] Fix handling of triples.
Some Ubuntu builds were broken after 20d497c "[Driver] Remove unneeded
*-linux-gnu after D158183".
This patch by Fangrui Song fixes this with a handling in config.guess.
---
llvm/cmake/config.guess | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/llvm/cmake/config.guess b/llvm/cmake/config.guess
index f489623677694..2444ed7f5792b 100644
--- a/llvm/cmake/config.guess
+++ b/llvm/cmake/config.guess
@@ -4,7 +4,7 @@
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011 Free Software Foundation, Inc.
-timestamp='2011-08-20'
+timestamp='2024-06-07'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@@ -1028,7 +1028,11 @@ EOF
echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
+ if [ "$(grep -Ei 'debian|ubuntu' /etc/lsb-release)" ]; then
+ echo ${UNAME_MACHINE}-linux-gnu
+ else
+ echo ${UNAME_MACHINE}-ibm-linux
+ fi
exit ;;
sh64*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
>From 7d71a9585fe8a9ab5e591a9f86d59602928056a7 Mon Sep 17 00:00:00 2001
From: Matthias Springer <me at m-sp.org>
Date: Sat, 8 Jun 2024 10:26:17 +0200
Subject: [PATCH 42/57] [mlir][Transforms][NFC] `GreedyPatternRewriteDriver`:
Use composition instead of inheritance (#92785)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This commit simplifies the design of the `GreedyPatternRewriterDriver`
class. This class used to inherit from both `PatternRewriter` and
`RewriterBase::Listener` and then attached itself as a listener.
In the new design, the class has a `PatternRewriter` field instead of
inheriting from `PatternRewriter`, which is generally perferred in
object-oriented programming.
---------
Co-authored-by: Markus Böck <markus.boeck02 at gmail.com>
---
mlir/include/mlir/IR/PatternMatch.h | 1 +
.../Utils/GreedyPatternRewriteDriver.cpp | 40 ++++++++++---------
2 files changed, 23 insertions(+), 18 deletions(-)
diff --git a/mlir/include/mlir/IR/PatternMatch.h b/mlir/include/mlir/IR/PatternMatch.h
index 2562301e499dd..ed7b9ece4a464 100644
--- a/mlir/include/mlir/IR/PatternMatch.h
+++ b/mlir/include/mlir/IR/PatternMatch.h
@@ -784,6 +784,7 @@ class IRRewriter : public RewriterBase {
/// place.
class PatternRewriter : public RewriterBase {
public:
+ explicit PatternRewriter(MLIRContext *ctx) : RewriterBase(ctx) {}
using RewriterBase::RewriterBase;
/// A hook used to indicate if the pattern rewriter can recover from failure
diff --git a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
index cfd4f9c03aaff..597cb29ce911b 100644
--- a/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
+++ b/mlir/lib/Transforms/Utils/GreedyPatternRewriteDriver.cpp
@@ -319,8 +319,7 @@ class RandomizedWorklist : public Worklist {
/// This abstract class manages the worklist and contains helper methods for
/// rewriting ops on the worklist. Derived classes specify how ops are added
/// to the worklist in the beginning.
-class GreedyPatternRewriteDriver : public PatternRewriter,
- public RewriterBase::Listener {
+class GreedyPatternRewriteDriver : public RewriterBase::Listener {
protected:
explicit GreedyPatternRewriteDriver(MLIRContext *ctx,
const FrozenRewritePatternSet &patterns,
@@ -339,7 +338,8 @@ class GreedyPatternRewriteDriver : public PatternRewriter,
/// Notify the driver that the specified operation was inserted. Update the
/// worklist as needed: The operation is enqueued depending on scope and
/// strict mode.
- void notifyOperationInserted(Operation *op, InsertPoint previous) override;
+ void notifyOperationInserted(Operation *op,
+ OpBuilder::InsertPoint previous) override;
/// Notify the driver that the specified operation was removed. Update the
/// worklist as needed: The operation and its children are removed from the
@@ -354,6 +354,10 @@ class GreedyPatternRewriteDriver : public PatternRewriter,
/// reached. Return `true` if any IR was changed.
bool processWorklist();
+ /// The pattern rewriter that is used for making IR modifications and is
+ /// passed to rewrite patterns.
+ PatternRewriter rewriter;
+
/// The worklist for this transformation keeps track of the operations that
/// need to be (re)visited.
#ifdef MLIR_GREEDY_REWRITE_RANDOMIZER_SEED
@@ -407,7 +411,7 @@ class GreedyPatternRewriteDriver : public PatternRewriter,
GreedyPatternRewriteDriver::GreedyPatternRewriteDriver(
MLIRContext *ctx, const FrozenRewritePatternSet &patterns,
const GreedyRewriteConfig &config)
- : PatternRewriter(ctx), config(config), matcher(patterns)
+ : rewriter(ctx), config(config), matcher(patterns)
#if MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
// clang-format off
, expensiveChecks(
@@ -423,9 +427,9 @@ GreedyPatternRewriteDriver::GreedyPatternRewriteDriver(
#if MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
// Send IR notifications to the debug handler. This handler will then forward
// all notifications to this GreedyPatternRewriteDriver.
- setListener(&expensiveChecks);
+ rewriter.setListener(&expensiveChecks);
#else
- setListener(this);
+ rewriter.setListener(this);
#endif // MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
}
@@ -473,7 +477,7 @@ bool GreedyPatternRewriteDriver::processWorklist() {
// If the operation is trivially dead - remove it.
if (isOpTriviallyDead(op)) {
- eraseOp(op);
+ rewriter.eraseOp(op);
changed = true;
LLVM_DEBUG(logResultWithLine("success", "operation is trivially dead"));
@@ -505,8 +509,8 @@ bool GreedyPatternRewriteDriver::processWorklist() {
// Op results can be replaced with `foldResults`.
assert(foldResults.size() == op->getNumResults() &&
"folder produced incorrect number of results");
- OpBuilder::InsertionGuard g(*this);
- setInsertionPoint(op);
+ OpBuilder::InsertionGuard g(rewriter);
+ rewriter.setInsertionPoint(op);
SmallVector<Value> replacements;
bool materializationSucceeded = true;
for (auto [ofr, resultType] :
@@ -519,7 +523,7 @@ bool GreedyPatternRewriteDriver::processWorklist() {
}
// Materialize Attributes as SSA values.
Operation *constOp = op->getDialect()->materializeConstant(
- *this, ofr.get<Attribute>(), resultType, op->getLoc());
+ rewriter, ofr.get<Attribute>(), resultType, op->getLoc());
if (!constOp) {
// If materialization fails, cleanup any operations generated for
@@ -532,7 +536,7 @@ bool GreedyPatternRewriteDriver::processWorklist() {
replacementOps.insert(replacement.getDefiningOp());
}
for (Operation *op : replacementOps) {
- eraseOp(op);
+ rewriter.eraseOp(op);
}
materializationSucceeded = false;
@@ -547,7 +551,7 @@ bool GreedyPatternRewriteDriver::processWorklist() {
}
if (materializationSucceeded) {
- replaceOp(op, replacements);
+ rewriter.replaceOp(op, replacements);
changed = true;
LLVM_DEBUG(logSuccessfulFolding(dumpRootOp));
#if MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
@@ -608,7 +612,7 @@ bool GreedyPatternRewriteDriver::processWorklist() {
#endif // MLIR_ENABLE_EXPENSIVE_PATTERN_API_CHECKS
LogicalResult matchResult =
- matcher.matchAndRewrite(op, *this, canApply, onFailure, onSuccess);
+ matcher.matchAndRewrite(op, rewriter, canApply, onFailure, onSuccess);
if (succeeded(matchResult)) {
LLVM_DEBUG(logResultWithLine("success", "pattern matched"));
@@ -664,8 +668,8 @@ void GreedyPatternRewriteDriver::notifyBlockErased(Block *block) {
config.listener->notifyBlockErased(block);
}
-void GreedyPatternRewriteDriver::notifyOperationInserted(Operation *op,
- InsertPoint previous) {
+void GreedyPatternRewriteDriver::notifyOperationInserted(
+ Operation *op, OpBuilder::InsertPoint previous) {
LLVM_DEBUG({
logger.startLine() << "** Insert : '" << op->getName() << "'(" << op
<< ")\n";
@@ -822,7 +826,7 @@ class GreedyPatternRewriteIteration
LogicalResult RegionPatternRewriteDriver::simplify(bool *changed) && {
bool continueRewrites = false;
int64_t iteration = 0;
- MLIRContext *ctx = getContext();
+ MLIRContext *ctx = rewriter.getContext();
do {
// Check if the iteration limit was reached.
if (++iteration > config.maxIterations &&
@@ -834,7 +838,7 @@ LogicalResult RegionPatternRewriteDriver::simplify(bool *changed) && {
// `OperationFolder` CSE's constant ops (and may move them into parents
// regions to enable more aggressive CSE'ing).
- OperationFolder folder(getContext(), this);
+ OperationFolder folder(ctx, this);
auto insertKnownConstant = [&](Operation *op) {
// Check for existing constants when populating the worklist. This avoids
// accidentally reversing the constant order during processing.
@@ -872,7 +876,7 @@ LogicalResult RegionPatternRewriteDriver::simplify(bool *changed) && {
// After applying patterns, make sure that the CFG of each of the
// regions is kept up to date.
if (config.enableRegionSimplification)
- continueRewrites |= succeeded(simplifyRegions(*this, region));
+ continueRewrites |= succeeded(simplifyRegions(rewriter, region));
},
{®ion}, iteration);
} while (continueRewrites);
>From b4149b101bce027c86c04b5bd404cffa009721f5 Mon Sep 17 00:00:00 2001
From: Timm Baeder <tbaeder at redhat.com>
Date: Sat, 8 Jun 2024 11:20:31 +0200
Subject: [PATCH 43/57] [clang] Report erroneous floating point results in
_Complex math (#90588)
Use handleFloatFloatBinOp to properly diagnose NaN results and divisions
by zero.
Fixes #84871
---
clang/lib/AST/ExprConstant.cpp | 27 +++++++++---
clang/test/SemaCXX/complex-folding.cpp | 61 ++++++++++++++------------
2 files changed, 55 insertions(+), 33 deletions(-)
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index f1aa19e4409e1..86fb396fabe2d 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -15209,11 +15209,21 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
APFloat &ResI = Result.getComplexFloatImag();
if (LHSReal) {
assert(!RHSReal && "Cannot have two real operands for a complex op!");
- ResR = A * C;
- ResI = A * D;
+ ResR = A;
+ ResI = A;
+ // ResR = A * C;
+ // ResI = A * D;
+ if (!handleFloatFloatBinOp(Info, E, ResR, BO_Mul, C) ||
+ !handleFloatFloatBinOp(Info, E, ResI, BO_Mul, D))
+ return false;
} else if (RHSReal) {
- ResR = C * A;
- ResI = C * B;
+ // ResR = C * A;
+ // ResI = C * B;
+ ResR = C;
+ ResI = C;
+ if (!handleFloatFloatBinOp(Info, E, ResR, BO_Mul, A) ||
+ !handleFloatFloatBinOp(Info, E, ResI, BO_Mul, B))
+ return false;
} else {
// In the fully general case, we need to handle NaNs and infinities
// robustly.
@@ -15289,8 +15299,13 @@ bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
APFloat &ResR = Result.getComplexFloatReal();
APFloat &ResI = Result.getComplexFloatImag();
if (RHSReal) {
- ResR = A / C;
- ResI = B / C;
+ ResR = A;
+ ResI = B;
+ // ResR = A / C;
+ // ResI = B / C;
+ if (!handleFloatFloatBinOp(Info, E, ResR, BO_Div, C) ||
+ !handleFloatFloatBinOp(Info, E, ResI, BO_Div, C))
+ return false;
} else {
if (LHSReal) {
// No real optimizations we can do here, stub out with zero.
diff --git a/clang/test/SemaCXX/complex-folding.cpp b/clang/test/SemaCXX/complex-folding.cpp
index 054f159e9ce0d..7bfd36f156ea6 100644
--- a/clang/test/SemaCXX/complex-folding.cpp
+++ b/clang/test/SemaCXX/complex-folding.cpp
@@ -59,41 +59,48 @@ static_assert((1.25 / (0.25 - 0.75j)) == (0.5 + 1.5j));
// Test that infinities are preserved, don't turn into NaNs, and do form zeros
// when the divisor.
+constexpr _Complex float InfC = {1.0, __builtin_inf()};
+constexpr _Complex float InfInf = __builtin_inf() + InfC;
+static_assert(__real__(InfInf) == __builtin_inf());
+static_assert(__imag__(InfInf) == __builtin_inf());
+static_assert(__builtin_isnan(__real__(InfInf * InfInf)));
+static_assert(__builtin_isinf_sign(__imag__(InfInf * InfInf)) == 1);
+
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) * 1.0)) == 1);
-static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) * 1.0)) == 1);
+static_assert(__builtin_isinf_sign(__imag__((1.0 + InfC) * 1.0)) == 1);
static_assert(__builtin_isinf_sign(__real__(1.0 * (__builtin_inf() + 1.0j))) == 1);
-static_assert(__builtin_isinf_sign(__imag__(1.0 * (1.0 + __builtin_inf() * 1.0j))) == 1);
-
+static_assert(__builtin_isinf_sign(__imag__(1.0 * (1.0 + InfC))) == 1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) * (1.0 + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) * (__builtin_inf() + 1.0j))) == 1);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) * (__builtin_inf() + 1.0j))) == 1);
-
-static_assert(__builtin_isinf_sign(__real__((1.0 + __builtin_inf() * 1.0j) * (1.0 + 1.0j))) == -1);
-static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) * (1.0 + 1.0j))) == 1);
-static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) * (1.0 + __builtin_inf() * 1.0j))) == -1);
-static_assert(__builtin_isinf_sign(__imag__((1.0 + 1.0j) * (1.0 + __builtin_inf() * 1.0j))) == 1);
-
-static_assert(__builtin_isinf_sign(__real__((1.0 + __builtin_inf() * 1.0j) * (1.0 + __builtin_inf() * 1.0j))) == -1);
-static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + __builtin_inf() * 1.0j) * (__builtin_inf() + __builtin_inf() * 1.0j))) == -1);
-
+static_assert(__builtin_isinf_sign(__real__((1.0 + InfC) * (1.0 + 1.0j))) == -1);
+static_assert(__builtin_isinf_sign(__imag__((1.0 + InfC) * (1.0 + 1.0j))) == 1);
+static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) * (1.0 + InfC))) == -1);
+static_assert(__builtin_isinf_sign(__imag__((1.0 + 1.0j) * (1.0 + InfC))) == 1);
+static_assert(__builtin_isinf_sign(__real__((1.0 + InfC) * (1.0 + InfC))) == -1);
+static_assert(__builtin_isinf_sign(__real__(InfInf * InfInf)) == 0);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / (1.0 + 1.0j))) == 1);
-static_assert(__builtin_isinf_sign(__imag__(1.0 + (__builtin_inf() * 1.0j) / (1.0 + 1.0j))) == 1);
-static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / (1.0 + 1.0j))) == 1);
+static_assert(__builtin_isinf_sign(__imag__(1.0 + (InfC) / (1.0 + 1.0j))) == 1);
+static_assert(__builtin_isinf_sign(__imag__((InfInf) / (1.0 + 1.0j))) == 0);
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / 1.0)) == 1);
-static_assert(__builtin_isinf_sign(__imag__(1.0 + (__builtin_inf() * 1.0j) / 1.0)) == 1);
-static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / 1.0)) == 1);
-
+static_assert(__builtin_isinf_sign(__imag__(1.0 + (InfC) / 1.0)) == 1);
+static_assert(__builtin_isinf_sign(__imag__((InfInf) / 1.0)) == 1);
static_assert(((1.0 + 1.0j) / (__builtin_inf() + 1.0j)) == (0.0 + 0.0j));
-static_assert(((1.0 + 1.0j) / (1.0 + __builtin_inf() * 1.0j)) == (0.0 + 0.0j));
-static_assert(((1.0 + 1.0j) / (__builtin_inf() + __builtin_inf() * 1.0j)) == (0.0 + 0.0j));
+static_assert(((1.0 + 1.0j) / (1.0 + InfC)) == (0.0 + 0.0j));
+static_assert(((1.0 + 1.0j) / (InfInf)) == (0.0 + 0.0j));
static_assert(((1.0 + 1.0j) / __builtin_inf()) == (0.0 + 0.0j));
-
+static_assert(1.0j / 0.0 == 1); // expected-error {{static assertion}} \
+ // expected-note {{division by zero}}
static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) / (0.0 + 0.0j))) == 1);
-static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) / 0.0)) == 1);
-
+static_assert(__builtin_isinf_sign(__real__((1.0 + 1.0j) / 0.0)) == 1); // expected-error {{static assertion}} \
+ // expected-note {{division by zero}}
static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / (0.0 + 0.0j))) == 1);
-static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) / (0.0 + 0.0j))) == 1);
-static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / (0.0 + 0.0j))) == 1);
-static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / 0.0)) == 1);
-static_assert(__builtin_isinf_sign(__imag__((1.0 + __builtin_inf() * 1.0j) / 0.0)) == 1);
-static_assert(__builtin_isinf_sign(__imag__((__builtin_inf() + __builtin_inf() * 1.0j) / 0.0)) == 1);
+static_assert(__builtin_isinf_sign(__imag__((1.0 + InfC) / (0.0 + 0.0j))) == 1);
+static_assert(__builtin_isinf_sign(__imag__((InfInf) / (0.0 + 0.0j))) == 1);
+static_assert(__builtin_isinf_sign(__real__((__builtin_inf() + 1.0j) / 0.0)) == 1); // expected-error {{static assertion}} \
+ // expected-note {{division by zero}}
+static_assert(__builtin_isinf_sign(__imag__((1.0 + InfC) / 0.0)) == 1); // expected-error {{static assertion}} \
+ // expected-note {{division by zero}}
+static_assert(__builtin_isinf_sign(__imag__((InfInf) / 0.0)) == 1); // expected-error {{static assertion}} \
+ // expected-note {{division by zero}}
+
>From 68e5dd582774791a6ba366d7c0aa1167489e3114 Mon Sep 17 00:00:00 2001
From: Quentin Colombet <quentin.colombet at gmail.com>
Date: Sat, 8 Jun 2024 11:31:13 +0200
Subject: [PATCH 44/57] [SDISel][Combine] Constant fold FP16_TO_FP (#94790)
In some case, constant can survive early constant folding optimization
because they are hidden behind several layers of type changes.
E.g., consider the following sequence (extracted from the arm test that
this commit changes):
```
t2: v1f16 = BUILD_VECTOR ConstantFP:f16<APFloat(0)>
t4: v1f16 = insert_vector_elt t2, ConstantFP:f16<APFloat(0)>, Constant:i32<0>
t5: f16 = bitcast t4
t6: f32 = fp_extend t5
```
Because the constant (APFloat(0)) is hidden behind a <1 x ty> type, all
the constant folding that normally happen for scalar nodes when using
`SelectionDAG::getNode` are blocked.
As a result the constant manages to survive as an actual conversion
instruction down to the select phase:
```
t11: f32 = fp16_to_fp Constant:i32<0>
```
With the change in this patch, we try to do constant folding one more
time during dag combine, which in the motivating example result in the
much better sequence:
```
t7: ch = CopyToReg t0, Register:f32 %0, ConstantFP:f32<0.000000e+00>
```
Note: I'm sure we have this problem in a lot of other places. Generally
speaking I believe SDISel is not that good with <1 x ty> compared to
pure scalar. However, I only changed what I could easily test.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 7 ++++++-
llvm/test/CodeGen/AMDGPU/clamp-modifier.ll | 3 +--
llvm/test/CodeGen/AMDGPU/select-phi-s16-fp.ll | 3 +--
llvm/test/CodeGen/ARM/arm-half-promote.ll | 4 +---
4 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 2d5968bf5c2ea..70b3c7d2fad81 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -26586,7 +26586,12 @@ SDValue DAGCombiner::visitFP16_TO_FP(SDNode *N) {
}
}
- return SDValue();
+ // Sometimes constants manage to survive very late in the pipeline, e.g.,
+ // because they are wrapped inside the <1 x f16> type. Try one last time to
+ // get rid of them.
+ SDValue Folded = DAG.FoldConstantArithmetic(N->getOpcode(), SDLoc(N),
+ N->getValueType(0), {N0});
+ return Folded;
}
SDValue DAGCombiner::visitFP_TO_BF16(SDNode *N) {
diff --git a/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll b/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll
index 0a0179e866cd3..84bd9b6f6c5d4 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll
+++ b/llvm/test/CodeGen/AMDGPU/clamp-modifier.ll
@@ -1489,9 +1489,8 @@ define amdgpu_kernel void @v_no_clamp_add_src_v2f16_f16_src(ptr addrspace(1) %ou
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 s[4:5], s[2:3]
; SI-NEXT: buffer_load_ushort v1, v[1:2], s[4:7], 0 addr64
-; SI-NEXT: v_cvt_f32_f16_e64 v3, s6 clamp
+; SI-NEXT: v_cvt_f16_f32_e32 v3, 0
; SI-NEXT: s_mov_b64 s[2:3], s[6:7]
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
diff --git a/llvm/test/CodeGen/AMDGPU/select-phi-s16-fp.ll b/llvm/test/CodeGen/AMDGPU/select-phi-s16-fp.ll
index 0d6e987165d87..ba04cdb795ce3 100644
--- a/llvm/test/CodeGen/AMDGPU/select-phi-s16-fp.ll
+++ b/llvm/test/CodeGen/AMDGPU/select-phi-s16-fp.ll
@@ -14,9 +14,8 @@ define void @phi_vec1half_to_f32_with_const_folding(ptr addrspace(1) %dst) #0 {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: s_mov_b32 s4, 0
-; CHECK-NEXT: v_cvt_f32_f16_e64 v2, s4
; CHECK-NEXT: ; %bb.1: ; %bb
-; CHECK-NEXT: v_cvt_f16_f32_e64 v2, v2
+; CHECK-NEXT: v_cvt_f16_f32_e64 v2, s4
; CHECK-NEXT: s_mov_b32 s7, 0xf000
; CHECK-NEXT: s_mov_b32 s6, 0
; CHECK-NEXT: s_mov_b32 s4, s6
diff --git a/llvm/test/CodeGen/ARM/arm-half-promote.ll b/llvm/test/CodeGen/ARM/arm-half-promote.ll
index a5fafd4238616..e1ab75b2ac7f1 100644
--- a/llvm/test/CodeGen/ARM/arm-half-promote.ll
+++ b/llvm/test/CodeGen/ARM/arm-half-promote.ll
@@ -116,9 +116,7 @@ define fastcc { <8 x half>, <8 x half> } @f3() {
define void @extract_insert(ptr %dst) optnone noinline {
; CHECK-LABEL: extract_insert:
-; CHECK: movs r1, #0
-; CHECK: vmov s0, r1
-; CHECK: vcvtb.f32.f16 s0, s0
+; CHECK: vmov.i32 d0, #0x0
; CHECK: vcvtb.f16.f32 s0, s0
; CHECK: vmov r1, s0
; CHECK: strh r1, [r0]
>From 8ff2eeda4f66aa1b426257f2d00af555049e1ede Mon Sep 17 00:00:00 2001
From: Marc Auberer <marc.auberer at chillibits.com>
Date: Sat, 8 Jun 2024 12:29:01 +0200
Subject: [PATCH 45/57] [compiler-rt] Replace deprecated aligned_storage with
aligned byte array (#94171)
`std::aligned_storage` is deprecated with C++23, see
[here](https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2021/p1413r3.pdf).
This replaces the usages of `std::aligned_storage` within compiler-rt
with an aligned `std::byte` array.
I will provide patches for other subcomponents as well.
---
.../tests/unit/function_call_trie_test.cpp | 8 +++---
.../tests/unit/profile_collector_test.cpp | 4 +--
.../xray/tests/unit/segmented_array_test.cpp | 8 +++---
.../lib/xray/tests/unit/test_helpers.cpp | 3 +--
compiler-rt/lib/xray/xray_fdr_logging.cpp | 19 +++++--------
.../lib/xray/xray_function_call_trie.h | 20 ++++++--------
.../lib/xray/xray_profile_collector.cpp | 27 +++++++++----------
compiler-rt/lib/xray/xray_profiling.cpp | 13 ++++-----
compiler-rt/lib/xray/xray_segmented_array.h | 3 +--
compiler-rt/test/tsan/custom_mutex4.cpp | 8 +++---
compiler-rt/test/tsan/custom_mutex5.cpp | 8 +++---
11 files changed, 50 insertions(+), 71 deletions(-)
diff --git a/compiler-rt/lib/xray/tests/unit/function_call_trie_test.cpp b/compiler-rt/lib/xray/tests/unit/function_call_trie_test.cpp
index c90d6637fff5e..b058e3ced2792 100644
--- a/compiler-rt/lib/xray/tests/unit/function_call_trie_test.cpp
+++ b/compiler-rt/lib/xray/tests/unit/function_call_trie_test.cpp
@@ -310,16 +310,14 @@ TEST(FunctionCallTrieTest, MergeInto) {
TEST(FunctionCallTrieTest, PlacementNewOnAlignedStorage) {
profilingFlags()->setDefaults();
- typename std::aligned_storage<sizeof(FunctionCallTrie::Allocators),
- alignof(FunctionCallTrie::Allocators)>::type
- AllocatorsStorage;
+ alignas(FunctionCallTrie::Allocators)
+ std::byte AllocatorsStorage[sizeof(FunctionCallTrie::Allocators)];
new (&AllocatorsStorage)
FunctionCallTrie::Allocators(FunctionCallTrie::InitAllocators());
auto *A =
reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage);
- typename std::aligned_storage<sizeof(FunctionCallTrie),
- alignof(FunctionCallTrie)>::type FCTStorage;
+ alignas(FunctionCallTrie) std::byte FCTStorage[sizeof(FunctionCallTrie)];
new (&FCTStorage) FunctionCallTrie(*A);
auto *T = reinterpret_cast<FunctionCallTrie *>(&FCTStorage);
diff --git a/compiler-rt/lib/xray/tests/unit/profile_collector_test.cpp b/compiler-rt/lib/xray/tests/unit/profile_collector_test.cpp
index eab5579cb3ee0..da50642284d43 100644
--- a/compiler-rt/lib/xray/tests/unit/profile_collector_test.cpp
+++ b/compiler-rt/lib/xray/tests/unit/profile_collector_test.cpp
@@ -38,8 +38,8 @@ struct ExpectedProfilingFileHeader {
void ValidateFileHeaderBlock(XRayBuffer B) {
ASSERT_NE(static_cast<const void *>(B.Data), nullptr);
ASSERT_EQ(B.Size, sizeof(ExpectedProfilingFileHeader));
- typename std::aligned_storage<sizeof(ExpectedProfilingFileHeader)>::type
- FileHeaderStorage;
+ alignas(ExpectedProfilingFileHeader)
+ std::byte FileHeaderStorage[sizeof(ExpectedProfilingFileHeader)];
ExpectedProfilingFileHeader ExpectedHeader;
std::memcpy(&FileHeaderStorage, B.Data, B.Size);
auto &FileHeader =
diff --git a/compiler-rt/lib/xray/tests/unit/segmented_array_test.cpp b/compiler-rt/lib/xray/tests/unit/segmented_array_test.cpp
index 46aeb88f71b4c..26c80ded1d7ec 100644
--- a/compiler-rt/lib/xray/tests/unit/segmented_array_test.cpp
+++ b/compiler-rt/lib/xray/tests/unit/segmented_array_test.cpp
@@ -226,13 +226,11 @@ TEST(SegmentedArrayTest, SimulateStackBehaviour) {
TEST(SegmentedArrayTest, PlacementNewOnAlignedStorage) {
using AllocatorType = typename Array<ShadowStackEntry>::AllocatorType;
- typename std::aligned_storage<sizeof(AllocatorType),
- alignof(AllocatorType)>::type AllocatorStorage;
+ alignas(AllocatorType) std::byte AllocatorStorage[sizeof(AllocatorType)];
new (&AllocatorStorage) AllocatorType(1 << 10);
auto *A = reinterpret_cast<AllocatorType *>(&AllocatorStorage);
- typename std::aligned_storage<sizeof(Array<ShadowStackEntry>),
- alignof(Array<ShadowStackEntry>)>::type
- ArrayStorage;
+ alignas(Array<ShadowStackEntry>)
+ std::byte ArrayStorage[sizeof(Array<ShadowStackEntry>)];
new (&ArrayStorage) Array<ShadowStackEntry>(*A);
auto *Data = reinterpret_cast<Array<ShadowStackEntry> *>(&ArrayStorage);
diff --git a/compiler-rt/lib/xray/tests/unit/test_helpers.cpp b/compiler-rt/lib/xray/tests/unit/test_helpers.cpp
index 6075f36a4a78a..81a93d8720a6c 100644
--- a/compiler-rt/lib/xray/tests/unit/test_helpers.cpp
+++ b/compiler-rt/lib/xray/tests/unit/test_helpers.cpp
@@ -69,8 +69,7 @@ namespace __xray {
std::string serialize(BufferQueue &Buffers, int32_t Version) {
std::string Serialized;
- std::aligned_storage<sizeof(XRayFileHeader), alignof(XRayFileHeader)>::type
- HeaderStorage;
+ alignas(XRayFileHeader) std::byte HeaderStorage[sizeof(XRayFileHeader)];
auto *Header = reinterpret_cast<XRayFileHeader *>(&HeaderStorage);
new (Header) XRayFileHeader();
Header->Version = Version;
diff --git a/compiler-rt/lib/xray/xray_fdr_logging.cpp b/compiler-rt/lib/xray/xray_fdr_logging.cpp
index 378a8c0f4a70a..7def3565d56ab 100644
--- a/compiler-rt/lib/xray/xray_fdr_logging.cpp
+++ b/compiler-rt/lib/xray/xray_fdr_logging.cpp
@@ -55,17 +55,12 @@ struct XRAY_TLS_ALIGNAS(64) ThreadLocalData {
BufferQueue::Buffer Buffer{};
BufferQueue *BQ = nullptr;
- using LogWriterStorage =
- typename std::aligned_storage<sizeof(FDRLogWriter),
- alignof(FDRLogWriter)>::type;
-
- LogWriterStorage LWStorage;
+ using LogWriterStorage = std::byte[sizeof(FDRLogWriter)];
+ alignas(FDRLogWriter) LogWriterStorage LWStorage;
FDRLogWriter *Writer = nullptr;
- using ControllerStorage =
- typename std::aligned_storage<sizeof(FDRController<>),
- alignof(FDRController<>)>::type;
- ControllerStorage CStorage;
+ using ControllerStorage = std::byte[sizeof(FDRController<>)];
+ alignas(FDRController<>) ControllerStorage CStorage;
FDRController<> *Controller = nullptr;
};
@@ -78,7 +73,7 @@ static_assert(std::is_trivially_destructible<ThreadLocalData>::value,
static pthread_key_t Key;
// Global BufferQueue.
-static std::aligned_storage<sizeof(BufferQueue)>::type BufferQueueStorage;
+static std::byte BufferQueueStorage[sizeof(BufferQueue)];
static BufferQueue *BQ = nullptr;
// Global thresholds for function durations.
@@ -129,8 +124,8 @@ static_assert(alignof(ThreadLocalData) >= 64,
"ThreadLocalData must be cache line aligned.");
#endif
static ThreadLocalData &getThreadLocalData() {
- thread_local typename std::aligned_storage<
- sizeof(ThreadLocalData), alignof(ThreadLocalData)>::type TLDStorage{};
+ alignas(ThreadLocalData) thread_local std::byte
+ TLDStorage[sizeof(ThreadLocalData)];
if (pthread_getspecific(Key) == NULL) {
new (reinterpret_cast<ThreadLocalData *>(&TLDStorage)) ThreadLocalData{};
diff --git a/compiler-rt/lib/xray/xray_function_call_trie.h b/compiler-rt/lib/xray/xray_function_call_trie.h
index b8c60583761b5..7536f39b8081a 100644
--- a/compiler-rt/lib/xray/xray_function_call_trie.h
+++ b/compiler-rt/lib/xray/xray_function_call_trie.h
@@ -139,18 +139,14 @@ class FunctionCallTrie {
// Use hosted aligned storage members to allow for trivial move and init.
// This also allows us to sidestep the potential-failing allocation issue.
- typename std::aligned_storage<sizeof(NodeAllocatorType),
- alignof(NodeAllocatorType)>::type
- NodeAllocatorStorage;
- typename std::aligned_storage<sizeof(RootAllocatorType),
- alignof(RootAllocatorType)>::type
- RootAllocatorStorage;
- typename std::aligned_storage<sizeof(ShadowStackAllocatorType),
- alignof(ShadowStackAllocatorType)>::type
- ShadowStackAllocatorStorage;
- typename std::aligned_storage<sizeof(NodeIdPairAllocatorType),
- alignof(NodeIdPairAllocatorType)>::type
- NodeIdPairAllocatorStorage;
+ alignas(NodeAllocatorType) std::byte
+ NodeAllocatorStorage[sizeof(NodeAllocatorType)];
+ alignas(RootAllocatorType) std::byte
+ RootAllocatorStorage[sizeof(RootAllocatorType)];
+ alignas(ShadowStackAllocatorType) std::byte
+ ShadowStackAllocatorStorage[sizeof(ShadowStackAllocatorType)];
+ alignas(NodeIdPairAllocatorType) std::byte
+ NodeIdPairAllocatorStorage[sizeof(NodeIdPairAllocatorType)];
NodeAllocatorType *NodeAllocator = nullptr;
RootAllocatorType *RootAllocator = nullptr;
diff --git a/compiler-rt/lib/xray/xray_profile_collector.cpp b/compiler-rt/lib/xray/xray_profile_collector.cpp
index bef2504f2a16e..3a28240e603c9 100644
--- a/compiler-rt/lib/xray/xray_profile_collector.cpp
+++ b/compiler-rt/lib/xray/xray_profile_collector.cpp
@@ -29,7 +29,7 @@ namespace {
SpinMutex GlobalMutex;
struct ThreadTrie {
tid_t TId;
- typename std::aligned_storage<sizeof(FunctionCallTrie)>::type TrieStorage;
+ alignas(FunctionCallTrie) std::byte TrieStorage[sizeof(FunctionCallTrie)];
};
struct ProfileBuffer {
@@ -71,16 +71,13 @@ using ThreadDataAllocator = ThreadDataArray::AllocatorType;
// by the ThreadData array. This lets us host the buffers, allocators, and tries
// associated with a thread by moving the data into the array instead of
// attempting to copy the data to a separately backed set of tries.
-static typename std::aligned_storage<
- sizeof(BufferQueue), alignof(BufferQueue)>::type BufferQueueStorage;
+alignas(BufferQueue) static std::byte BufferQueueStorage[sizeof(BufferQueue)];
static BufferQueue *BQ = nullptr;
static BufferQueue::Buffer Buffer;
-static typename std::aligned_storage<sizeof(ThreadDataAllocator),
- alignof(ThreadDataAllocator)>::type
- ThreadDataAllocatorStorage;
-static typename std::aligned_storage<sizeof(ThreadDataArray),
- alignof(ThreadDataArray)>::type
- ThreadDataArrayStorage;
+alignas(ThreadDataAllocator) static std::byte
+ ThreadDataAllocatorStorage[sizeof(ThreadDataAllocator)];
+alignas(ThreadDataArray) static std::byte
+ ThreadDataArrayStorage[sizeof(ThreadDataArray)];
static ThreadDataAllocator *TDAllocator = nullptr;
static ThreadDataArray *TDArray = nullptr;
@@ -91,10 +88,10 @@ using ProfileBufferArrayAllocator = typename ProfileBufferArray::AllocatorType;
// These need to be global aligned storage to avoid dynamic initialization. We
// need these to be aligned to allow us to placement new objects into the
// storage, and have pointers to those objects be appropriately aligned.
-static typename std::aligned_storage<sizeof(ProfileBufferArray)>::type
- ProfileBuffersStorage;
-static typename std::aligned_storage<sizeof(ProfileBufferArrayAllocator)>::type
- ProfileBufferArrayAllocatorStorage;
+alignas(ProfileBufferArray) static std::byte
+ ProfileBuffersStorage[sizeof(ProfileBufferArray)];
+alignas(ProfileBufferArrayAllocator) static std::byte
+ ProfileBufferArrayAllocatorStorage[sizeof(ProfileBufferArrayAllocator)];
static ProfileBufferArrayAllocator *ProfileBuffersAllocator = nullptr;
static ProfileBufferArray *ProfileBuffers = nullptr;
@@ -382,8 +379,8 @@ XRayBuffer nextBuffer(XRayBuffer B) XRAY_NEVER_INSTRUMENT {
return {nullptr, 0};
static pthread_once_t Once = PTHREAD_ONCE_INIT;
- static typename std::aligned_storage<sizeof(XRayProfilingFileHeader)>::type
- FileHeaderStorage;
+ alignas(XRayProfilingFileHeader) static std::byte
+ FileHeaderStorage[sizeof(XRayProfilingFileHeader)];
pthread_once(
&Once, +[]() XRAY_NEVER_INSTRUMENT {
new (&FileHeaderStorage) XRayProfilingFileHeader{};
diff --git a/compiler-rt/lib/xray/xray_profiling.cpp b/compiler-rt/lib/xray/xray_profiling.cpp
index 259ec65a76a1f..e9ac2fdd8aadf 100644
--- a/compiler-rt/lib/xray/xray_profiling.cpp
+++ b/compiler-rt/lib/xray/xray_profiling.cpp
@@ -48,17 +48,14 @@ static pthread_key_t ProfilingKey;
// We use a global buffer queue, which gets initialized once at initialisation
// time, and gets reset when profiling is "done".
-static std::aligned_storage<sizeof(BufferQueue), alignof(BufferQueue)>::type
- BufferQueueStorage;
+alignas(BufferQueue) static std::byte BufferQueueStorage[sizeof(BufferQueue)];
static BufferQueue *BQ = nullptr;
thread_local FunctionCallTrie::Allocators::Buffers ThreadBuffers;
-thread_local std::aligned_storage<sizeof(FunctionCallTrie::Allocators),
- alignof(FunctionCallTrie::Allocators)>::type
- AllocatorsStorage;
-thread_local std::aligned_storage<sizeof(FunctionCallTrie),
- alignof(FunctionCallTrie)>::type
- FunctionCallTrieStorage;
+alignas(FunctionCallTrie::Allocators) thread_local std::byte
+ AllocatorsStorage[sizeof(FunctionCallTrie::Allocators)];
+alignas(FunctionCallTrie) thread_local std::byte
+ FunctionCallTrieStorage[sizeof(FunctionCallTrie)];
thread_local ProfilingData TLD{{0}, {0}};
thread_local atomic_uint8_t ReentranceGuard{0};
diff --git a/compiler-rt/lib/xray/xray_segmented_array.h b/compiler-rt/lib/xray/xray_segmented_array.h
index 6eb673edffea4..3ab174bcbe18c 100644
--- a/compiler-rt/lib/xray/xray_segmented_array.h
+++ b/compiler-rt/lib/xray/xray_segmented_array.h
@@ -56,8 +56,7 @@ template <class T> class Array {
// kCacheLineSize-multiple segments, minus the size of two pointers.
//
// - Request cacheline-multiple sized elements from the allocator.
- static constexpr uint64_t AlignedElementStorageSize =
- sizeof(typename std::aligned_storage<sizeof(T), alignof(T)>::type);
+ static constexpr uint64_t AlignedElementStorageSize = sizeof(T);
static constexpr uint64_t SegmentControlBlockSize = sizeof(Segment *) * 2;
diff --git a/compiler-rt/test/tsan/custom_mutex4.cpp b/compiler-rt/test/tsan/custom_mutex4.cpp
index 539a8be803c60..f7dfab0235312 100644
--- a/compiler-rt/test/tsan/custom_mutex4.cpp
+++ b/compiler-rt/test/tsan/custom_mutex4.cpp
@@ -1,7 +1,7 @@
-// RUN: %clangxx_tsan -O1 --std=c++11 %s -o %t && %run %t 2>&1 | FileCheck %s
+// RUN: %clangxx_tsan -O1 --std=c++17 %s -o %t && %run %t 2>&1 | FileCheck %s
#include "custom_mutex.h"
-#include <type_traits>
+#include <cstddef>
// Test that the destruction events of a mutex are ignored when the
// annotations request this.
@@ -12,14 +12,14 @@
// has run.
int main() {
- std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu1_store;
+ alignas(Mutex) std::byte mu1_store[sizeof(Mutex)];
Mutex* mu1 = reinterpret_cast<Mutex*>(&mu1_store);
new(&mu1_store) Mutex(false, __tsan_mutex_linker_init);
mu1->Lock();
mu1->~Mutex();
mu1->Unlock();
- std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu2_store;
+ alignas(Mutex) std::byte mu2_store[sizeof(Mutex)];
Mutex* mu2 = reinterpret_cast<Mutex*>(&mu2_store);
new(&mu2_store) Mutex(false, 0, __tsan_mutex_not_static);
mu2->Lock();
diff --git a/compiler-rt/test/tsan/custom_mutex5.cpp b/compiler-rt/test/tsan/custom_mutex5.cpp
index cb18b235e2628..6d65829e5df3c 100644
--- a/compiler-rt/test/tsan/custom_mutex5.cpp
+++ b/compiler-rt/test/tsan/custom_mutex5.cpp
@@ -1,20 +1,20 @@
-// RUN: %clangxx_tsan -O1 --std=c++11 %s -o %t && %deflake %run %t 2>&1 | FileCheck %s
+// RUN: %clangxx_tsan -O1 --std=c++17 %s -o %t && %deflake %run %t 2>&1 | FileCheck %s
#include "custom_mutex.h"
-#include <type_traits>
+#include <cstddef>
// Test that we detect the destruction of an in-use mutex when the
// thread annotations don't otherwise disable the check.
int main() {
- std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu1_store;
+ alignas(Mutex) std::byte mu1_store[sizeof(Mutex)];
Mutex* mu1 = reinterpret_cast<Mutex*>(&mu1_store);
new(&mu1_store) Mutex(false, 0);
mu1->Lock();
mu1->~Mutex();
mu1->Unlock();
- std::aligned_storage<sizeof(Mutex), alignof(Mutex)>::type mu2_store;
+ alignas(Mutex) std::byte mu2_store[sizeof(Mutex)];
Mutex* mu2 = reinterpret_cast<Mutex*>(&mu2_store);
new(&mu2_store)
Mutex(false, __tsan_mutex_not_static, __tsan_mutex_not_static);
>From 7b669c2a6852b8b0a3d1e489ee58b41f41dbf404 Mon Sep 17 00:00:00 2001
From: NAKAMURA Takumi <geek4civic at gmail.com>
Date: Sat, 8 Jun 2024 20:13:38 +0900
Subject: [PATCH 46/57] lld/test: Make sure removing %t at first
2e1788f8e265 reverted #94843. It was creating `%t` as a directory and
causes an error in incremental builds.
---
lld/test/ELF/aarch64-reloc-pauth.s | 1 +
1 file changed, 1 insertion(+)
diff --git a/lld/test/ELF/aarch64-reloc-pauth.s b/lld/test/ELF/aarch64-reloc-pauth.s
index b603d8ffdcabd..0cfcb1665b939 100644
--- a/lld/test/ELF/aarch64-reloc-pauth.s
+++ b/lld/test/ELF/aarch64-reloc-pauth.s
@@ -1,5 +1,6 @@
# REQUIRES: aarch64
+# RUN: rm -rf %t
# RUN: llvm-mc -filetype=obj -triple=aarch64 %p/Inputs/shared2.s -o %t.a.o
# RUN: ld.lld -shared %t.a.o -soname=so -o %t.a.so
# RUN: llvm-mc -filetype=obj -triple=aarch64 %s -o %t.o
>From 76ec394356e3a28085005e6f855c3e6025ef69ae Mon Sep 17 00:00:00 2001
From: Vlad Serebrennikov <serebrennikov.vladislav at gmail.com>
Date: Sat, 8 Jun 2024 16:23:17 +0400
Subject: [PATCH 47/57] Enable LLDB tests in Linux pre-merge CI (#94208)
This patch removes LLDB from a list of projects that are excluded from
building and testing on pre-merge CI on Linux.
Windows environment needs to be prepared in order to test LLDB
(https://github.com/llvm/llvm-project/pull/94208#issuecomment-2146256857),
but we don't have enough maintenance resources to do that at the moment.
Because LLDB has been in the list of projects that need to be tested on
Clang changes, this PR make this happen on Linux. This seems to be the
consensus in the discussion of this PR.
---
.ci/generate-buildkite-pipeline-premerge | 5 ++---
.ci/monolithic-linux.sh | 1 +
clang/examples/PrintFunctionNames/PrintFunctionNames.cpp | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.ci/generate-buildkite-pipeline-premerge b/.ci/generate-buildkite-pipeline-premerge
index 033ab804b165e..fd603de611e56 100755
--- a/.ci/generate-buildkite-pipeline-premerge
+++ b/.ci/generate-buildkite-pipeline-premerge
@@ -153,7 +153,6 @@ function exclude-linux() {
for project in ${projects}; do
case ${project} in
cross-project-tests) ;; # tests failing
- lldb) ;; # tests failing
openmp) ;; # https://github.com/google/llvm-premerge-checks/issues/410
*)
echo "${project}"
@@ -170,7 +169,7 @@ function exclude-windows() {
compiler-rt) ;; # tests taking too long
openmp) ;; # TODO: having trouble with the Perl installation
libc) ;; # no Windows support
- lldb) ;; # tests failing
+ lldb) ;; # custom environment requirements (https://github.com/llvm/llvm-project/pull/94208#issuecomment-2146256857)
bolt) ;; # tests are not supported yet
*)
echo "${project}"
@@ -213,7 +212,7 @@ function check-targets() {
echo "check-unwind"
;;
lldb)
- echo "check-all" # TODO: check-lldb may not include all the LLDB tests?
+ echo "check-lldb"
;;
pstl)
echo "check-all"
diff --git a/.ci/monolithic-linux.sh b/.ci/monolithic-linux.sh
index 38d7128f241b6..b78dc59432b65 100755
--- a/.ci/monolithic-linux.sh
+++ b/.ci/monolithic-linux.sh
@@ -39,6 +39,7 @@ targets="${2}"
echo "--- cmake"
pip install -q -r "${MONOREPO_ROOT}"/mlir/python/requirements.txt
+pip install -q -r "${MONOREPO_ROOT}"/lldb/test/requirements.txt
cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \
-D LLVM_ENABLE_PROJECTS="${projects}" \
-G Ninja \
diff --git a/clang/examples/PrintFunctionNames/PrintFunctionNames.cpp b/clang/examples/PrintFunctionNames/PrintFunctionNames.cpp
index 6509a6440e12d..b2b785b87c25c 100644
--- a/clang/examples/PrintFunctionNames/PrintFunctionNames.cpp
+++ b/clang/examples/PrintFunctionNames/PrintFunctionNames.cpp
@@ -72,7 +72,7 @@ class PrintFunctionsConsumer : public ASTConsumer {
*sema.LateParsedTemplateMap.find(FD)->second;
sema.LateTemplateParser(sema.OpaqueParser, LPT);
llvm::errs() << "late-parsed-decl: \"" << FD->getNameAsString() << "\"\n";
- }
+ }
}
};
>From e7f708cf658e6dc72f4acc584a32e8f10c1ae0d2 Mon Sep 17 00:00:00 2001
From: DaPorkchop_ <daporkchop at daporkchop.net>
Date: Sat, 8 Jun 2024 15:32:34 +0200
Subject: [PATCH 48/57] [SimplifyCFG] Don't use a mask for lookup tables
generated from switches with an unreachable default case (#94468)
When transforming a switch with holes into a lookup table, we currently
use a mask to check if the current index is handled by the switch or if
it is a hole. If it is a hole, we skip loading from the lookup table.
Normally, if the switch's default case is unreachable this has no
impact, as the mask test gets optimized away by subsequent passes.
However, if the switch is large enough that the number of lookup table
entries exceeds the target's register width, we won't be able to fit all
the cases into a mask and the switch won't get transformed into a lookup
table. If we know that the switch's default case is unreachable, we know
that the mask is unnecessary and can skip constructing it entirely,
which allows us to transform the switch into a lookup table.
[Example](https://godbolt.org/z/7x7qfx8M1)
In the future, it might be interesting to consider allowing lookup table
masks to be more than one register large (e.g. using a constant array of
bit flags, similar to `std::bitset`).
---
llvm/lib/Transforms/Utils/SimplifyCFG.cpp | 30 +-
.../RISCV/switch-of-powers-of-two.ll | 4 -
.../SimplifyCFG/X86/switch_to_lookup_table.ll | 45 +-
.../X86/switch_to_lookup_table_big.ll | 542 ++++++++++++++++++
4 files changed, 607 insertions(+), 14 deletions(-)
create mode 100644 llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table_big.ll
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index fe6ec8819ff99..292739b6c5fda 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -6743,8 +6743,25 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
TableSize =
(MaxCaseVal->getValue() - MinCaseVal->getValue()).getLimitedValue() + 1;
+ // If the default destination is unreachable, or if the lookup table covers
+ // all values of the conditional variable, branch directly to the lookup table
+ // BB. Otherwise, check that the condition is within the case range.
+ bool DefaultIsReachable = !SI->defaultDestUndefined();
+
bool TableHasHoles = (NumResults < TableSize);
- bool NeedMask = (TableHasHoles && !HasDefaultResults);
+
+ // If the table has holes but the default destination doesn't produce any
+ // constant results, the lookup table entries corresponding to the holes will
+ // contain undefined values.
+ bool AllHolesAreUndefined = TableHasHoles && !HasDefaultResults;
+
+ // If the default destination doesn't produce a constant result but is still
+ // reachable, and the lookup table has holes, we need to use a mask to
+ // determine if the current index should load from the lookup table or jump
+ // to the default case.
+ // The mask is unnecessary if the table has holes but the default destination
+ // is unreachable, as in that case the holes must also be unreachable.
+ bool NeedMask = AllHolesAreUndefined && DefaultIsReachable;
if (NeedMask) {
// As an extra penalty for the validity test we require more cases.
if (SI->getNumCases() < 4) // FIXME: Find best threshold value (benchmark).
@@ -6766,12 +6783,6 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
"It is impossible for a switch to have more entries than the max "
"representable value of its input integer type's size.");
- // If the default destination is unreachable, or if the lookup table covers
- // all values of the conditional variable, branch directly to the lookup table
- // BB. Otherwise, check that the condition is within the case range.
- bool DefaultIsReachable =
- !isa<UnreachableInst>(SI->getDefaultDest()->getFirstNonPHIOrDbg());
-
// Create the BB that does the lookups.
Module &Mod = *CommonDest->getParent()->getParent();
BasicBlock *LookupBB = BasicBlock::Create(
@@ -6895,8 +6906,9 @@ static bool SwitchToLookupTable(SwitchInst *SI, IRBuilder<> &Builder,
for (PHINode *PHI : PHIs) {
const ResultListTy &ResultList = ResultLists[PHI];
- // If using a bitmask, use any value to fill the lookup table holes.
- Constant *DV = NeedMask ? ResultLists[PHI][0].second : DefaultResults[PHI];
+ // Use any value to fill the lookup table holes.
+ Constant *DV =
+ AllHolesAreUndefined ? ResultLists[PHI][0].second : DefaultResults[PHI];
StringRef FuncName = Fn->getName();
SwitchLookupTable Table(Mod, TableSize, TableIndexOffset, ResultList, DV,
DL, FuncName);
diff --git a/llvm/test/Transforms/SimplifyCFG/RISCV/switch-of-powers-of-two.ll b/llvm/test/Transforms/SimplifyCFG/RISCV/switch-of-powers-of-two.ll
index 3ded78d85a610..2ac94afd95910 100644
--- a/llvm/test/Transforms/SimplifyCFG/RISCV/switch-of-powers-of-two.ll
+++ b/llvm/test/Transforms/SimplifyCFG/RISCV/switch-of-powers-of-two.ll
@@ -34,10 +34,6 @@ define i32 @switch_of_powers(i32 %x) {
; RV64ZBB-LABEL: @switch_of_powers(
; RV64ZBB-NEXT: entry:
; RV64ZBB-NEXT: [[TMP0:%.*]] = call i32 @llvm.cttz.i32(i32 [[X:%.*]], i1 true)
-; RV64ZBB-NEXT: [[SWITCH_MASKINDEX:%.*]] = trunc i32 [[TMP0]] to i8
-; RV64ZBB-NEXT: [[SWITCH_SHIFTED:%.*]] = lshr i8 121, [[SWITCH_MASKINDEX]]
-; RV64ZBB-NEXT: [[SWITCH_LOBIT:%.*]] = trunc i8 [[SWITCH_SHIFTED]] to i1
-; RV64ZBB-NEXT: call void @llvm.assume(i1 [[SWITCH_LOBIT]])
; RV64ZBB-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [7 x i32], ptr @switch.table.switch_of_powers, i32 0, i32 [[TMP0]]
; RV64ZBB-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
; RV64ZBB-NEXT: ret i32 [[SWITCH_LOAD]]
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
index d6450a2b4a348..845c5008e3837 100644
--- a/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
@@ -38,6 +38,7 @@ target triple = "x86_64-unknown-linux-gnu"
; CHECK: @switch.table.threecases = private unnamed_addr constant [3 x i32] [i32 10, i32 7, i32 5], align 4
; CHECK: @switch.table.covered_switch_with_bit_tests = private unnamed_addr constant [8 x i32] [i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 1, i32 1], align 4
; CHECK: @switch.table.signed_overflow1 = private unnamed_addr constant [4 x i32] [i32 3333, i32 4444, i32 1111, i32 2222], align 4
+; CHECK: @switch.table.signed_overflow2 = private unnamed_addr constant [4 x i32] [i32 3333, i32 4444, i32 2222, i32 2222], align 4
;.
define i32 @f(i32 %c) {
; CHECK-LABEL: @f(
@@ -1738,12 +1739,53 @@ define i32 @signed_overflow2(i8 %n) {
; CHECK-LABEL: @signed_overflow2(
; CHECK-NEXT: start:
; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[N:%.*]] to i2
-; CHECK-NEXT: switch i2 [[TRUNC]], label [[BB1:%.*]] [
+; CHECK-NEXT: [[SWITCH_TABLEIDX:%.*]] = sub i2 [[TRUNC]], -2
+; CHECK-NEXT: [[SWITCH_TABLEIDX_ZEXT:%.*]] = zext i2 [[SWITCH_TABLEIDX]] to i3
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [4 x i32], ptr @switch.table.signed_overflow2, i32 0, i3 [[SWITCH_TABLEIDX_ZEXT]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: ret i32 [[SWITCH_LOAD]]
+;
+start:
+ %trunc = trunc i8 %n to i2
+ switch i2 %trunc, label %bb1 [
+ i2 1, label %bb3
+ i2 -2, label %bb4
+ i2 -1, label %bb5
+ ]
+
+bb1: ; preds = %start
+ unreachable
+
+bb3: ; preds = %start
+ br label %bb6
+
+bb4: ; preds = %start
+ br label %bb6
+
+bb5: ; preds = %start
+ br label %bb6
+
+bb6: ; preds = %start, %bb3, %bb4, %bb5
+ %.sroa.0.0 = phi i32 [ 4444, %bb5 ], [ 3333, %bb4 ], [ 2222, %bb3 ]
+ ret i32 %.sroa.0.0
+}
+
+; This is the same as @signed_overflow2 except that the default case calls @exit(), so it
+; isn't treated as unreachable
+define i32 @signed_overflow3(i8 %n) {
+; CHECK-LABEL: @signed_overflow3(
+; CHECK-NEXT: start:
+; CHECK-NEXT: [[TRUNC:%.*]] = trunc i8 [[N:%.*]] to i2
+; CHECK-NEXT: switch i2 [[TRUNC]], label [[START_UNREACHABLEDEFAULT:%.*]] [
; CHECK-NEXT: i2 1, label [[BB6:%.*]]
; CHECK-NEXT: i2 -2, label [[BB4:%.*]]
; CHECK-NEXT: i2 -1, label [[BB5:%.*]]
+; CHECK-NEXT: i2 0, label [[BB1:%.*]]
; CHECK-NEXT: ]
+; CHECK: start.unreachabledefault:
+; CHECK-NEXT: unreachable
; CHECK: bb1:
+; CHECK-NEXT: call void @exit(i32 1)
; CHECK-NEXT: unreachable
; CHECK: bb4:
; CHECK-NEXT: br label [[BB6]]
@@ -1762,6 +1804,7 @@ start:
]
bb1: ; preds = %start
+ call void @exit(i32 1)
unreachable
bb3: ; preds = %start
diff --git a/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table_big.ll b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table_big.ll
new file mode 100644
index 0000000000000..7988e3057a2c2
--- /dev/null
+++ b/llvm/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table_big.ll
@@ -0,0 +1,542 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
+; RUN: opt < %s -passes=simplifycfg -switch-to-lookup=true -S | FileCheck %s
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32"
+target triple = "i386-pc-linux-gnu"
+
+; A dense switch with a reachable default case should be optimized into a lookup table with a bounds check
+;.
+; CHECK: @switch.table.reachable_default_dense_0to31 = private unnamed_addr constant [32 x i32] [i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1], align 4
+; CHECK: @switch.table.unreachable_default_dense_0to31 = private unnamed_addr constant [32 x i32] [i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1], align 4
+; CHECK: @switch.table.reachable_default_holes_0to31 = private unnamed_addr constant [32 x i32] [i32 0, i32 7, i32 6, i32 0, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 0, i32 2, i32 1, i32 0, i32 7, i32 0, i32 5, i32 4, i32 3, i32 2, i32 0, i32 0, i32 7, i32 6, i32 5, i32 0, i32 3, i32 2, i32 1], align 4
+; CHECK: @switch.table.unreachable_default_holes_0to31 = private unnamed_addr constant [32 x i32] [i32 0, i32 7, i32 6, i32 0, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 0, i32 2, i32 1, i32 0, i32 7, i32 0, i32 5, i32 4, i32 3, i32 2, i32 0, i32 0, i32 7, i32 6, i32 5, i32 0, i32 3, i32 2, i32 1], align 4
+; CHECK: @switch.table.reachable_default_dense_0to32 = private unnamed_addr constant [33 x i32] [i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0], align 4
+; CHECK: @switch.table.unreachable_default_dense_0to32 = private unnamed_addr constant [33 x i32] [i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0], align 4
+; CHECK: @switch.table.unreachable_default_holes_0to32 = private unnamed_addr constant [33 x i32] [i32 0, i32 7, i32 6, i32 0, i32 4, i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 0, i32 2, i32 1, i32 0, i32 7, i32 0, i32 5, i32 4, i32 3, i32 2, i32 0, i32 0, i32 7, i32 6, i32 5, i32 0, i32 3, i32 2, i32 1, i32 0], align 4
+;.
+define i32 @reachable_default_dense_0to31(i32 %x, i32 %y) {
+; CHECK-LABEL: @reachable_default_dense_0to31(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X:%.*]], 32
+; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
+; CHECK: switch.lookup:
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [32 x i32], ptr @switch.table.reachable_default_dense_0to31, i32 0, i32 [[X]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ [[Y:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 3, label %bb5
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 8, label %bb0
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 13, label %bb3
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 18, label %bb6
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 23, label %bb1
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 28, label %bb4
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ ]
+
+sw.default: br label %return
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ %y, %sw.default ], [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
+
+; A dense switch with an unreachable default case should be optimized into a lookup table without bounds checks
+define i32 @unreachable_default_dense_0to31(i32 %x, i32 %y) {
+; CHECK-LABEL: @unreachable_default_dense_0to31(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [32 x i32], ptr @switch.table.unreachable_default_dense_0to31, i32 0, i32 [[X:%.*]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: ret i32 [[SWITCH_LOAD]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 3, label %bb5
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 8, label %bb0
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 13, label %bb3
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 18, label %bb6
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 23, label %bb1
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 28, label %bb4
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ ]
+
+sw.default: unreachable
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
+
+; A sparse switch with a reachable default case should be optimized into a lookup table with a bounds check and a mask
+define i32 @reachable_default_holes_0to31(i32 %x, i32 %y) {
+; CHECK-LABEL: @reachable_default_holes_0to31(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X:%.*]], 32
+; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_HOLE_CHECK:%.*]], label [[RETURN:%.*]]
+; CHECK: switch.hole_check:
+; CHECK-NEXT: [[SWITCH_SHIFTED:%.*]] = lshr i32 -277094665, [[X]]
+; CHECK-NEXT: [[SWITCH_LOBIT:%.*]] = trunc i32 [[SWITCH_SHIFTED]] to i1
+; CHECK-NEXT: br i1 [[SWITCH_LOBIT]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN]]
+; CHECK: switch.lookup:
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [32 x i32], ptr @switch.table.reachable_default_holes_0to31, i32 0, i32 [[X]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ [[Y:%.*]], [[SWITCH_HOLE_CHECK]] ], [ [[Y]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ ]
+
+sw.default: br label %return
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ %y, %sw.default ], [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
+
+; A sparse switch with an unreachable default case should be optimized into a lookup table without bounds checks
+define i32 @unreachable_default_holes_0to31(i32 %x, i32 %y) {
+; CHECK-LABEL: @unreachable_default_holes_0to31(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [32 x i32], ptr @switch.table.unreachable_default_holes_0to31, i32 0, i32 [[X:%.*]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: ret i32 [[SWITCH_LOAD]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ ]
+
+sw.default: unreachable
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
+
+; A dense switch with a reachable default case should be optimized into a lookup table with a bounds check
+define i32 @reachable_default_dense_0to32(i32 %x, i32 %y) {
+; CHECK-LABEL: @reachable_default_dense_0to32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[X:%.*]], 33
+; CHECK-NEXT: br i1 [[TMP0]], label [[SWITCH_LOOKUP:%.*]], label [[RETURN:%.*]]
+; CHECK: switch.lookup:
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [33 x i32], ptr @switch.table.reachable_default_dense_0to32, i32 0, i32 [[X]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ [[SWITCH_LOAD]], [[SWITCH_LOOKUP]] ], [ [[Y:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 3, label %bb5
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 8, label %bb0
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 13, label %bb3
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 18, label %bb6
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 23, label %bb1
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 28, label %bb4
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ i32 32, label %bb0
+ ]
+
+sw.default: br label %return
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ %y, %sw.default ], [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
+
+; A dense switch with an unreachable default case should be optimized into a lookup table without bounds checks
+define i32 @unreachable_default_dense_0to32(i32 %x, i32 %y) {
+; CHECK-LABEL: @unreachable_default_dense_0to32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [33 x i32], ptr @switch.table.unreachable_default_dense_0to32, i32 0, i32 [[X:%.*]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: ret i32 [[SWITCH_LOAD]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 3, label %bb5
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 8, label %bb0
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 13, label %bb3
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 18, label %bb6
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 23, label %bb1
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 28, label %bb4
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ i32 32, label %bb0
+ ]
+
+sw.default: unreachable
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
+
+; A sparse switch with a reachable default case which would be optimized into a lookup table with a bounds check and a mask, but doesn't because
+; it would require a 33-bit mask
+define i32 @reachable_default_holes_0to32(i32 %x, i32 %y) {
+; CHECK-LABEL: @reachable_default_holes_0to32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: switch i32 [[X:%.*]], label [[RETURN:%.*]] [
+; CHECK-NEXT: i32 0, label [[BB0:%.*]]
+; CHECK-NEXT: i32 1, label [[BB7:%.*]]
+; CHECK-NEXT: i32 2, label [[BB6:%.*]]
+; CHECK-NEXT: i32 4, label [[BB4:%.*]]
+; CHECK-NEXT: i32 5, label [[BB3:%.*]]
+; CHECK-NEXT: i32 6, label [[BB2:%.*]]
+; CHECK-NEXT: i32 7, label [[BB1:%.*]]
+; CHECK-NEXT: i32 9, label [[BB7]]
+; CHECK-NEXT: i32 10, label [[BB6]]
+; CHECK-NEXT: i32 11, label [[BB5:%.*]]
+; CHECK-NEXT: i32 12, label [[BB4]]
+; CHECK-NEXT: i32 14, label [[BB2]]
+; CHECK-NEXT: i32 15, label [[BB1]]
+; CHECK-NEXT: i32 16, label [[BB0]]
+; CHECK-NEXT: i32 17, label [[BB7]]
+; CHECK-NEXT: i32 19, label [[BB5]]
+; CHECK-NEXT: i32 20, label [[BB4]]
+; CHECK-NEXT: i32 21, label [[BB3]]
+; CHECK-NEXT: i32 22, label [[BB2]]
+; CHECK-NEXT: i32 24, label [[BB0]]
+; CHECK-NEXT: i32 25, label [[BB7]]
+; CHECK-NEXT: i32 26, label [[BB6]]
+; CHECK-NEXT: i32 27, label [[BB5]]
+; CHECK-NEXT: i32 29, label [[BB3]]
+; CHECK-NEXT: i32 30, label [[BB2]]
+; CHECK-NEXT: i32 31, label [[BB1]]
+; CHECK-NEXT: i32 32, label [[BB0]]
+; CHECK-NEXT: ]
+; CHECK: bb0:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: bb1:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: bb2:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: bb3:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: bb4:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: bb5:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: bb6:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: bb7:
+; CHECK-NEXT: br label [[RETURN]]
+; CHECK: return:
+; CHECK-NEXT: [[RES:%.*]] = phi i32 [ 0, [[BB0]] ], [ 1, [[BB1]] ], [ 2, [[BB2]] ], [ 3, [[BB3]] ], [ 4, [[BB4]] ], [ 5, [[BB5]] ], [ 6, [[BB6]] ], [ 7, [[BB7]] ], [ [[Y:%.*]], [[ENTRY:%.*]] ]
+; CHECK-NEXT: ret i32 [[RES]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ i32 32, label %bb0
+ ]
+
+sw.default: br label %return
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ %y, %sw.default ], [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
+
+; A sparse switch with an unreachable default case which can be optimized into a lookup table without bounds checks. Because the default case is
+; unreachable, the fact that a 33-bit mask would be required doesn't prevent lookup table optimization.
+define i32 @unreachable_default_holes_0to32(i32 %x, i32 %y) {
+; CHECK-LABEL: @unreachable_default_holes_0to32(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[SWITCH_GEP:%.*]] = getelementptr inbounds [33 x i32], ptr @switch.table.unreachable_default_holes_0to32, i32 0, i32 [[X:%.*]]
+; CHECK-NEXT: [[SWITCH_LOAD:%.*]] = load i32, ptr [[SWITCH_GEP]], align 4
+; CHECK-NEXT: ret i32 [[SWITCH_LOAD]]
+;
+entry:
+ switch i32 %x, label %sw.default [
+ i32 0, label %bb0
+ i32 1, label %bb7
+ i32 2, label %bb6
+ i32 4, label %bb4
+ i32 5, label %bb3
+ i32 6, label %bb2
+ i32 7, label %bb1
+ i32 9, label %bb7
+ i32 10, label %bb6
+ i32 11, label %bb5
+ i32 12, label %bb4
+ i32 14, label %bb2
+ i32 15, label %bb1
+ i32 16, label %bb0
+ i32 17, label %bb7
+ i32 19, label %bb5
+ i32 20, label %bb4
+ i32 21, label %bb3
+ i32 22, label %bb2
+ i32 24, label %bb0
+ i32 25, label %bb7
+ i32 26, label %bb6
+ i32 27, label %bb5
+ i32 29, label %bb3
+ i32 30, label %bb2
+ i32 31, label %bb1
+ i32 32, label %bb0
+ ]
+
+sw.default: unreachable
+bb0: br label %return
+bb1: br label %return
+bb2: br label %return
+bb3: br label %return
+bb4: br label %return
+bb5: br label %return
+bb6: br label %return
+bb7: br label %return
+
+return:
+ %res = phi i32 [ 0, %bb0 ], [ 1, %bb1 ], [ 2, %bb2 ], [ 3, %bb3 ], [ 4, %bb4 ], [ 5, %bb5 ], [ 6, %bb6 ], [ 7, %bb7 ]
+ ret i32 %res
+
+}
>From 2c181cedbb33d47ce1869c65a61321a2b9658486 Mon Sep 17 00:00:00 2001
From: FantasqueX <fantasquex at gmail.com>
Date: Sat, 8 Jun 2024 21:33:43 +0800
Subject: [PATCH 49/57] [llvm] Remove useless headers in example BrainF
(#93701)
---
llvm/examples/BrainF/BrainF.cpp | 1 -
llvm/examples/BrainF/BrainFDriver.cpp | 3 ---
2 files changed, 4 deletions(-)
diff --git a/llvm/examples/BrainF/BrainF.cpp b/llvm/examples/BrainF/BrainF.cpp
index 1c7cacba4fff8..ac01961735e13 100644
--- a/llvm/examples/BrainF/BrainF.cpp
+++ b/llvm/examples/BrainF/BrainF.cpp
@@ -37,7 +37,6 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Type.h"
-#include "llvm/Support/Casting.h"
#include <cstdlib>
#include <iostream>
diff --git a/llvm/examples/BrainF/BrainFDriver.cpp b/llvm/examples/BrainF/BrainFDriver.cpp
index 6448347969236..98fa735e1491f 100644
--- a/llvm/examples/BrainF/BrainFDriver.cpp
+++ b/llvm/examples/BrainF/BrainFDriver.cpp
@@ -28,7 +28,6 @@
#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h"
-#include "llvm/ExecutionEngine/MCJIT.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
@@ -38,13 +37,11 @@
#include "llvm/IR/Module.h"
#include "llvm/IR/Value.h"
#include "llvm/IR/Verifier.h"
-#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
-#include <algorithm>
#include <cstdlib>
#include <fstream>
#include <iostream>
>From ff922f512d2b4d27fdf3f3d75540045fcc7d6fae Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sat, 8 Jun 2024 21:40:57 +0800
Subject: [PATCH 50/57] [DAGCombine] Fix miscompilation caused by PR94008
(#94850)
The pr description in #94008 mismatches with the code.
> + When VT is smaller than ShiftVT, it is safe to use trunc.
> + When VT is larger than ShiftVT, it is safe to use zext iff
`is_zero_poison` is true (i.e., `opcode == ISD::CTTZ_ZERO_UNDEF`). See
also the counterexample `src_shl_cttz2 -> tgt_shl_cttz2` in the alive2
proofs.
Closes #94824.
---
llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 2 +-
llvm/test/CodeGen/X86/pr94824.ll | 19 +++++++++++++++++++
2 files changed, 20 insertions(+), 1 deletion(-)
create mode 100644 llvm/test/CodeGen/X86/pr94824.ll
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 70b3c7d2fad81..e3bd4ea3ffd90 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10112,7 +10112,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// fold (shl X, cttz(Y)) -> (mul (Y & -Y), X) if cttz is unsupported on the
// target.
if (((N1.getOpcode() == ISD::CTTZ &&
- VT.getScalarSizeInBits() >= ShiftVT.getScalarSizeInBits()) ||
+ VT.getScalarSizeInBits() <= ShiftVT.getScalarSizeInBits()) ||
N1.getOpcode() == ISD::CTTZ_ZERO_UNDEF) &&
N1.hasOneUse() && !TLI.isOperationLegalOrCustom(ISD::CTTZ, ShiftVT) &&
TLI.isOperationLegalOrCustom(ISD::MUL, VT)) {
diff --git a/llvm/test/CodeGen/X86/pr94824.ll b/llvm/test/CodeGen/X86/pr94824.ll
new file mode 100644
index 0000000000000..7744d00acf3d4
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr94824.ll
@@ -0,0 +1,19 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-linux-gnu | FileCheck %s
+
+define i16 @pr94824(i8 %x1) {
+; CHECK-LABEL: pr94824:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: orl $256, %edi # imm = 0x100
+; CHECK-NEXT: rep bsfl %edi, %ecx
+; CHECK-NEXT: movl $1, %eax
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
+; CHECK-NEXT: shll %cl, %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
+; CHECK-NEXT: retq
+entry:
+ %cttz = call i8 @llvm.cttz.i8(i8 %x1, i1 false)
+ %ext = zext i8 %cttz to i16
+ %shl = shl i16 1, %ext
+ ret i16 %shl
+}
>From 109ae5e9c988ab0e6eeff97354164a7cc0772722 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sat, 8 Jun 2024 22:28:56 +0800
Subject: [PATCH 51/57] [Reassociate] Use uint64_t for repeat count (#94232)
This patch relands #91469 and uses `uint64_t` for repeat count to avoid
a miscompilation caused by overflow
https://github.com/llvm/llvm-project/pull/91469#discussion_r1623925158.
---
llvm/lib/Transforms/Scalar/Reassociate.cpp | 120 ++------------------
llvm/test/Transforms/Reassociate/repeats.ll | 45 +++++---
2 files changed, 43 insertions(+), 122 deletions(-)
diff --git a/llvm/lib/Transforms/Scalar/Reassociate.cpp b/llvm/lib/Transforms/Scalar/Reassociate.cpp
index c73d7c8d83bec..f36e21b296bd1 100644
--- a/llvm/lib/Transforms/Scalar/Reassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/Reassociate.cpp
@@ -302,98 +302,7 @@ static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) {
return Res;
}
-/// Returns k such that lambda(2^Bitwidth) = 2^k, where lambda is the Carmichael
-/// function. This means that x^(2^k) === 1 mod 2^Bitwidth for
-/// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic.
-/// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every
-/// even x in Bitwidth-bit arithmetic.
-static unsigned CarmichaelShift(unsigned Bitwidth) {
- if (Bitwidth < 3)
- return Bitwidth - 1;
- return Bitwidth - 2;
-}
-
-/// Add the extra weight 'RHS' to the existing weight 'LHS',
-/// reducing the combined weight using any special properties of the operation.
-/// The existing weight LHS represents the computation X op X op ... op X where
-/// X occurs LHS times. The combined weight represents X op X op ... op X with
-/// X occurring LHS + RHS times. If op is "Xor" for example then the combined
-/// operation is equivalent to X if LHS + RHS is odd, or 0 if LHS + RHS is even;
-/// the routine returns 1 in LHS in the first case, and 0 in LHS in the second.
-static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) {
- // If we were working with infinite precision arithmetic then the combined
- // weight would be LHS + RHS. But we are using finite precision arithmetic,
- // and the APInt sum LHS + RHS may not be correct if it wraps (it is correct
- // for nilpotent operations and addition, but not for idempotent operations
- // and multiplication), so it is important to correctly reduce the combined
- // weight back into range if wrapping would be wrong.
-
- // If RHS is zero then the weight didn't change.
- if (RHS.isMinValue())
- return;
- // If LHS is zero then the combined weight is RHS.
- if (LHS.isMinValue()) {
- LHS = RHS;
- return;
- }
- // From this point on we know that neither LHS nor RHS is zero.
-
- if (Instruction::isIdempotent(Opcode)) {
- // Idempotent means X op X === X, so any non-zero weight is equivalent to a
- // weight of 1. Keeping weights at zero or one also means that wrapping is
- // not a problem.
- assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
- return; // Return a weight of 1.
- }
- if (Instruction::isNilpotent(Opcode)) {
- // Nilpotent means X op X === 0, so reduce weights modulo 2.
- assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
- LHS = 0; // 1 + 1 === 0 modulo 2.
- return;
- }
- if (Opcode == Instruction::Add || Opcode == Instruction::FAdd) {
- // TODO: Reduce the weight by exploiting nsw/nuw?
- LHS += RHS;
- return;
- }
-
- assert((Opcode == Instruction::Mul || Opcode == Instruction::FMul) &&
- "Unknown associative operation!");
- unsigned Bitwidth = LHS.getBitWidth();
- // If CM is the Carmichael number then a weight W satisfying W >= CM+Bitwidth
- // can be replaced with W-CM. That's because x^W=x^(W-CM) for every Bitwidth
- // bit number x, since either x is odd in which case x^CM = 1, or x is even in
- // which case both x^W and x^(W - CM) are zero. By subtracting off multiples
- // of CM like this weights can always be reduced to the range [0, CM+Bitwidth)
- // which by a happy accident means that they can always be represented using
- // Bitwidth bits.
- // TODO: Reduce the weight by exploiting nsw/nuw? (Could do much better than
- // the Carmichael number).
- if (Bitwidth > 3) {
- /// CM - The value of Carmichael's lambda function.
- APInt CM = APInt::getOneBitSet(Bitwidth, CarmichaelShift(Bitwidth));
- // Any weight W >= Threshold can be replaced with W - CM.
- APInt Threshold = CM + Bitwidth;
- assert(LHS.ult(Threshold) && RHS.ult(Threshold) && "Weights not reduced!");
- // For Bitwidth 4 or more the following sum does not overflow.
- LHS += RHS;
- while (LHS.uge(Threshold))
- LHS -= CM;
- } else {
- // To avoid problems with overflow do everything the same as above but using
- // a larger type.
- unsigned CM = 1U << CarmichaelShift(Bitwidth);
- unsigned Threshold = CM + Bitwidth;
- assert(LHS.getZExtValue() < Threshold && RHS.getZExtValue() < Threshold &&
- "Weights not reduced!");
- unsigned Total = LHS.getZExtValue() + RHS.getZExtValue();
- while (Total >= Threshold)
- Total -= CM;
- LHS = Total;
- }
-}
-
-using RepeatedValue = std::pair<Value*, APInt>;
+using RepeatedValue = std::pair<Value *, uint64_t>;
/// Given an associative binary expression, return the leaf
/// nodes in Ops along with their weights (how many times the leaf occurs). The
@@ -475,7 +384,6 @@ static bool LinearizeExprTree(Instruction *I,
assert((isa<UnaryOperator>(I) || isa<BinaryOperator>(I)) &&
"Expected a UnaryOperator or BinaryOperator!");
LLVM_DEBUG(dbgs() << "LINEARIZE: " << *I << '\n');
- unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits();
unsigned Opcode = I->getOpcode();
assert(I->isAssociative() && I->isCommutative() &&
"Expected an associative and commutative operation!");
@@ -490,8 +398,8 @@ static bool LinearizeExprTree(Instruction *I,
// with their weights, representing a certain number of paths to the operator.
// If an operator occurs in the worklist multiple times then we found multiple
// ways to get to it.
- SmallVector<std::pair<Instruction*, APInt>, 8> Worklist; // (Op, Weight)
- Worklist.push_back(std::make_pair(I, APInt(Bitwidth, 1)));
+ SmallVector<std::pair<Instruction *, uint64_t>, 8> Worklist; // (Op, Weight)
+ Worklist.push_back(std::make_pair(I, 1));
bool Changed = false;
// Leaves of the expression are values that either aren't the right kind of
@@ -509,7 +417,7 @@ static bool LinearizeExprTree(Instruction *I,
// Leaves - Keeps track of the set of putative leaves as well as the number of
// paths to each leaf seen so far.
- using LeafMap = DenseMap<Value *, APInt>;
+ using LeafMap = DenseMap<Value *, uint64_t>;
LeafMap Leaves; // Leaf -> Total weight so far.
SmallVector<Value *, 8> LeafOrder; // Ensure deterministic leaf output order.
const DataLayout DL = I->getModule()->getDataLayout();
@@ -518,8 +426,8 @@ static bool LinearizeExprTree(Instruction *I,
SmallPtrSet<Value *, 8> Visited; // For checking the iteration scheme.
#endif
while (!Worklist.empty()) {
- std::pair<Instruction*, APInt> P = Worklist.pop_back_val();
- I = P.first; // We examine the operands of this binary operator.
+ // We examine the operands of this binary operator.
+ auto [I, Weight] = Worklist.pop_back_val();
if (isa<OverflowingBinaryOperator>(I)) {
Flags.HasNUW &= I->hasNoUnsignedWrap();
@@ -528,7 +436,6 @@ static bool LinearizeExprTree(Instruction *I,
for (unsigned OpIdx = 0; OpIdx < I->getNumOperands(); ++OpIdx) { // Visit operands.
Value *Op = I->getOperand(OpIdx);
- APInt Weight = P.second; // Number of paths to this operand.
LLVM_DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n");
assert(!Op->use_empty() && "No uses, so how did we get to it?!");
@@ -562,7 +469,8 @@ static bool LinearizeExprTree(Instruction *I,
"In leaf map but not visited!");
// Update the number of paths to the leaf.
- IncorporateWeight(It->second, Weight, Opcode);
+ It->second += Weight;
+ assert(It->second >= Weight && "Weight overflows");
// If we still have uses that are not accounted for by the expression
// then it is not safe to modify the value.
@@ -625,10 +533,7 @@ static bool LinearizeExprTree(Instruction *I,
// Node initially thought to be a leaf wasn't.
continue;
assert(!isReassociableOp(V, Opcode) && "Shouldn't be a leaf!");
- APInt Weight = It->second;
- if (Weight.isMinValue())
- // Leaf already output or weight reduction eliminated it.
- continue;
+ uint64_t Weight = It->second;
// Ensure the leaf is only output once.
It->second = 0;
Ops.push_back(std::make_pair(V, Weight));
@@ -642,7 +547,7 @@ static bool LinearizeExprTree(Instruction *I,
if (Ops.empty()) {
Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType());
assert(Identity && "Associative operation without identity!");
- Ops.emplace_back(Identity, APInt(Bitwidth, 1));
+ Ops.emplace_back(Identity, 1);
}
return Changed;
@@ -1188,8 +1093,7 @@ Value *ReassociatePass::RemoveFactorFromExpression(Value *V, Value *Factor) {
Factors.reserve(Tree.size());
for (unsigned i = 0, e = Tree.size(); i != e; ++i) {
RepeatedValue E = Tree[i];
- Factors.append(E.second.getZExtValue(),
- ValueEntry(getRank(E.first), E.first));
+ Factors.append(E.second, ValueEntry(getRank(E.first), E.first));
}
bool FoundFactor = false;
@@ -2368,7 +2272,7 @@ void ReassociatePass::ReassociateExpression(BinaryOperator *I) {
SmallVector<ValueEntry, 8> Ops;
Ops.reserve(Tree.size());
for (const RepeatedValue &E : Tree)
- Ops.append(E.second.getZExtValue(), ValueEntry(getRank(E.first), E.first));
+ Ops.append(E.second, ValueEntry(getRank(E.first), E.first));
LLVM_DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n');
diff --git a/llvm/test/Transforms/Reassociate/repeats.ll b/llvm/test/Transforms/Reassociate/repeats.ll
index ba25c4bfc643c..8600777877bb3 100644
--- a/llvm/test/Transforms/Reassociate/repeats.ll
+++ b/llvm/test/Transforms/Reassociate/repeats.ll
@@ -60,7 +60,8 @@ define i3 @foo3x5(i3 %x) {
; CHECK-SAME: i3 [[X:%.*]]) {
; CHECK-NEXT: [[TMP3:%.*]] = mul i3 [[X]], [[X]]
; CHECK-NEXT: [[TMP4:%.*]] = mul i3 [[TMP3]], [[X]]
-; CHECK-NEXT: ret i3 [[TMP4]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i3 [[TMP4]], [[TMP3]]
+; CHECK-NEXT: ret i3 [[TMP5]]
;
%tmp1 = mul i3 %x, %x
%tmp2 = mul i3 %tmp1, %x
@@ -74,7 +75,8 @@ define i3 @foo3x5_nsw(i3 %x) {
; CHECK-LABEL: define i3 @foo3x5_nsw(
; CHECK-SAME: i3 [[X:%.*]]) {
; CHECK-NEXT: [[TMP3:%.*]] = mul i3 [[X]], [[X]]
-; CHECK-NEXT: [[TMP4:%.*]] = mul nsw i3 [[TMP3]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = mul i3 [[TMP3]], [[X]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i3 [[TMP2]], [[TMP3]]
; CHECK-NEXT: ret i3 [[TMP4]]
;
%tmp1 = mul i3 %x, %x
@@ -89,7 +91,8 @@ define i3 @foo3x6(i3 %x) {
; CHECK-LABEL: define i3 @foo3x6(
; CHECK-SAME: i3 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i3 [[X]], [[X]]
-; CHECK-NEXT: [[TMP2:%.*]] = mul i3 [[TMP1]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i3 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP2:%.*]] = mul i3 [[TMP3]], [[TMP3]]
; CHECK-NEXT: ret i3 [[TMP2]]
;
%tmp1 = mul i3 %x, %x
@@ -106,7 +109,9 @@ define i3 @foo3x7(i3 %x) {
; CHECK-SAME: i3 [[X:%.*]]) {
; CHECK-NEXT: [[TMP5:%.*]] = mul i3 [[X]], [[X]]
; CHECK-NEXT: [[TMP6:%.*]] = mul i3 [[TMP5]], [[X]]
-; CHECK-NEXT: ret i3 [[TMP6]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i3 [[TMP6]], [[X]]
+; CHECK-NEXT: [[TMP7:%.*]] = mul i3 [[TMP3]], [[TMP6]]
+; CHECK-NEXT: ret i3 [[TMP7]]
;
%tmp1 = mul i3 %x, %x
%tmp2 = mul i3 %tmp1, %x
@@ -123,7 +128,8 @@ define i4 @foo4x8(i4 %x) {
; CHECK-SAME: i4 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
; CHECK-NEXT: [[TMP4:%.*]] = mul i4 [[TMP1]], [[TMP1]]
-; CHECK-NEXT: ret i4 [[TMP4]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i4 [[TMP4]], [[TMP4]]
+; CHECK-NEXT: ret i4 [[TMP3]]
;
%tmp1 = mul i4 %x, %x
%tmp2 = mul i4 %tmp1, %x
@@ -140,8 +146,9 @@ define i4 @foo4x9(i4 %x) {
; CHECK-LABEL: define i4 @foo4x9(
; CHECK-SAME: i4 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
-; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP1]], [[X]]
-; CHECK-NEXT: [[TMP8:%.*]] = mul i4 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP1]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i4 [[TMP2]], [[X]]
+; CHECK-NEXT: [[TMP8:%.*]] = mul i4 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i4 [[TMP8]]
;
%tmp1 = mul i4 %x, %x
@@ -160,7 +167,8 @@ define i4 @foo4x10(i4 %x) {
; CHECK-LABEL: define i4 @foo4x10(
; CHECK-SAME: i4 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
-; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i4 [[TMP1]], [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP4]], [[X]]
; CHECK-NEXT: [[TMP3:%.*]] = mul i4 [[TMP2]], [[TMP2]]
; CHECK-NEXT: ret i4 [[TMP3]]
;
@@ -181,7 +189,8 @@ define i4 @foo4x11(i4 %x) {
; CHECK-LABEL: define i4 @foo4x11(
; CHECK-SAME: i4 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
-; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i4 [[TMP1]], [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP4]], [[X]]
; CHECK-NEXT: [[TMP3:%.*]] = mul i4 [[TMP2]], [[X]]
; CHECK-NEXT: [[TMP10:%.*]] = mul i4 [[TMP3]], [[TMP2]]
; CHECK-NEXT: ret i4 [[TMP10]]
@@ -204,7 +213,9 @@ define i4 @foo4x12(i4 %x) {
; CHECK-LABEL: define i4 @foo4x12(
; CHECK-SAME: i4 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
-; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP1]], [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i4 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i4 [[TMP4]], [[TMP4]]
+; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP3]], [[TMP3]]
; CHECK-NEXT: ret i4 [[TMP2]]
;
%tmp1 = mul i4 %x, %x
@@ -227,7 +238,9 @@ define i4 @foo4x13(i4 %x) {
; CHECK-SAME: i4 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
; CHECK-NEXT: [[TMP2:%.*]] = mul i4 [[TMP1]], [[X]]
-; CHECK-NEXT: [[TMP12:%.*]] = mul i4 [[TMP2]], [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i4 [[TMP2]], [[TMP2]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i4 [[TMP3]], [[X]]
+; CHECK-NEXT: [[TMP12:%.*]] = mul i4 [[TMP4]], [[TMP3]]
; CHECK-NEXT: ret i4 [[TMP12]]
;
%tmp1 = mul i4 %x, %x
@@ -252,7 +265,9 @@ define i4 @foo4x14(i4 %x) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
; CHECK-NEXT: [[TMP6:%.*]] = mul i4 [[TMP1]], [[X]]
; CHECK-NEXT: [[TMP7:%.*]] = mul i4 [[TMP6]], [[TMP6]]
-; CHECK-NEXT: ret i4 [[TMP7]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i4 [[TMP7]], [[X]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i4 [[TMP4]], [[TMP4]]
+; CHECK-NEXT: ret i4 [[TMP5]]
;
%tmp1 = mul i4 %x, %x
%tmp2 = mul i4 %tmp1, %x
@@ -276,8 +291,10 @@ define i4 @foo4x15(i4 %x) {
; CHECK-SAME: i4 [[X:%.*]]) {
; CHECK-NEXT: [[TMP1:%.*]] = mul i4 [[X]], [[X]]
; CHECK-NEXT: [[TMP6:%.*]] = mul i4 [[TMP1]], [[X]]
-; CHECK-NEXT: [[TMP5:%.*]] = mul i4 [[TMP6]], [[X]]
-; CHECK-NEXT: [[TMP14:%.*]] = mul i4 [[TMP5]], [[TMP6]]
+; CHECK-NEXT: [[TMP3:%.*]] = mul i4 [[TMP6]], [[TMP6]]
+; CHECK-NEXT: [[TMP4:%.*]] = mul i4 [[TMP3]], [[X]]
+; CHECK-NEXT: [[TMP5:%.*]] = mul i4 [[TMP4]], [[X]]
+; CHECK-NEXT: [[TMP14:%.*]] = mul i4 [[TMP5]], [[TMP4]]
; CHECK-NEXT: ret i4 [[TMP14]]
;
%tmp1 = mul i4 %x, %x
>From c48950d7b9f204fa2d2353688ef849654838a416 Mon Sep 17 00:00:00 2001
From: AtariDreams <gfunni234 at gmail.com>
Date: Sat, 8 Jun 2024 11:01:05 -0400
Subject: [PATCH 52/57] [X86] Support ATOMIC_LOAD_FP_BINOP_MI for other binops
(#87524)
Since we can bitcast and then do the same thing sub does in the table
section above, I figured it was trivial to add fsub, fmul, and fdiv.
---
llvm/lib/Target/X86/X86InstrCompiler.td | 4 +-
llvm/test/CodeGen/X86/atomic-fp.ll | 2323 +++++++++++++++++++++++
2 files changed, 2326 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 270dd32c7235a..cf5a32f09f118 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1125,7 +1125,9 @@ multiclass ATOMIC_LOAD_FP_BINOP_MI<string Name, SDNode op> {
Requires<[HasAVX512]>;
}
defm : ATOMIC_LOAD_FP_BINOP_MI<"ADD", fadd>;
-// FIXME: Add fsub, fmul, fdiv, ...
+defm : ATOMIC_LOAD_FP_BINOP_MI<"SUB", fsub>;
+defm : ATOMIC_LOAD_FP_BINOP_MI<"MUL", fmul>;
+defm : ATOMIC_LOAD_FP_BINOP_MI<"DIV", fdiv>;
multiclass RELEASE_UNOP<string Name, dag dag8, dag dag16, dag dag32,
dag dag64> {
diff --git a/llvm/test/CodeGen/X86/atomic-fp.ll b/llvm/test/CodeGen/X86/atomic-fp.ll
index 1094edd19af43..fe79dfe39f645 100644
--- a/llvm/test/CodeGen/X86/atomic-fp.ll
+++ b/llvm/test/CodeGen/X86/atomic-fp.ll
@@ -777,3 +777,2326 @@ bb:
store atomic i64 %tmp9, ptr %tmp4 monotonic, align 8
ret void
}
+
+; ----- FSUB -----
+
+define dso_local void @fsub_32r(ptr %loc, float %val) nounwind {
+; X86-NOSSE-LABEL: fsub_32r:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl (%eax), %ecx
+; X86-NOSSE-NEXT: movl %ecx, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fsubs {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, (%eax)
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_32r:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl (%eax), %ecx
+; X86-SSE1-NEXT: movl %ecx, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: subss {{[0-9]+}}(%esp), %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT: movl %ecx, (%eax)
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_32r:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: subss {{[0-9]+}}(%esp), %xmm0
+; X86-SSE2-NEXT: movss %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_32r:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vsubss {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, (%eax)
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_32r:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: subss %xmm0, %xmm1
+; X64-SSE-NEXT: movss %xmm1, (%rdi)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_32r:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-AVX-NEXT: vsubss %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, (%rdi)
+; X64-AVX-NEXT: retq
+ %1 = load atomic i32, ptr %loc seq_cst, align 4
+ %2 = bitcast i32 %1 to float
+ %sub = fsub float %2, %val
+ %3 = bitcast float %sub to i32
+ store atomic i32 %3, ptr %loc release, align 4
+ ret void
+}
+
+define dso_local void @fsub_64r(ptr %loc, double %val) nounwind {
+; X86-NOSSE-LABEL: fsub_64r:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: movl 8(%ebp), %eax
+; X86-NOSSE-NEXT: fildll (%eax)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fsubl 12(%ebp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %ecx, (%esp)
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll (%eax)
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_64r:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: movl 8(%ebp), %eax
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fsubl 12(%ebp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, (%eax)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_64r:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movl 8(%ebp), %eax
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: subsd 12(%ebp), %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%eax)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_64r:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: movl 8(%ebp), %eax
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vsubsd 12(%ebp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%eax)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_64r:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE-NEXT: subsd %xmm0, %xmm1
+; X64-SSE-NEXT: movsd %xmm1, (%rdi)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_64r:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rdi)
+; X64-AVX-NEXT: retq
+ %1 = load atomic i64, ptr %loc seq_cst, align 8
+ %2 = bitcast i64 %1 to double
+ %sub = fsub double %2, %val
+ %3 = bitcast double %sub to i64
+ store atomic i64 %3, ptr %loc release, align 8
+ ret void
+}
+
+; Floating-point sub to a global using an immediate.
+define dso_local void @fsub_32g() nounwind {
+; X86-NOSSE-LABEL: fsub_32g:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl glob32, %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fchs
+; X86-NOSSE-NEXT: fadds (%esp)
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, glob32
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_32g:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl glob32, %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, glob32
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_32g:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: addss glob32, %xmm0
+; X86-SSE2-NEXT: movss %xmm0, glob32
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_32g:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-AVX-NEXT: vaddss glob32, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, glob32
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_32g:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-SSE-NEXT: addss glob32(%rip), %xmm0
+; X64-SSE-NEXT: movss %xmm0, glob32(%rip)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_32g:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-AVX-NEXT: vaddss glob32(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, glob32(%rip)
+; X64-AVX-NEXT: retq
+ %i = load atomic i32, ptr @glob32 monotonic, align 4
+ %f = bitcast i32 %i to float
+ %sub = fsub float %f, 1.000000e+00
+ %s = bitcast float %sub to i32
+ store atomic i32 %s, ptr @glob32 monotonic, align 4
+ ret void
+}
+
+define dso_local void @fsub_64g() nounwind {
+; X86-NOSSE-LABEL: fsub_64g:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll glob64
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fchs
+; X86-NOSSE-NEXT: faddl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll glob64
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_64g:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fld1
+; X86-SSE1-NEXT: fchs
+; X86-SSE1-NEXT: faddl (%esp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, glob64
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_64g:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: addsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, glob64
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_64g:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vaddsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, glob64
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_64g:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0]
+; X64-SSE-NEXT: addsd glob64(%rip), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, glob64(%rip)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_64g:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0]
+; X64-AVX-NEXT: vaddsd glob64(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, glob64(%rip)
+; X64-AVX-NEXT: retq
+ %i = load atomic i64, ptr @glob64 monotonic, align 8
+ %f = bitcast i64 %i to double
+ %sub = fsub double %f, 1.000000e+00
+ %s = bitcast double %sub to i64
+ store atomic i64 %s, ptr @glob64 monotonic, align 8
+ ret void
+}
+
+; Floating-point sub to a hard-coded immediate location using an immediate.
+define dso_local void @fsub_32imm() nounwind {
+; X86-NOSSE-LABEL: fsub_32imm:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl -559038737, %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fchs
+; X86-NOSSE-NEXT: fadds (%esp)
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, -559038737
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_32imm:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl -559038737, %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: addss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, -559038737
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_32imm:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: addss -559038737, %xmm0
+; X86-SSE2-NEXT: movss %xmm0, -559038737
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_32imm:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-AVX-NEXT: vaddss -559038737, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, -559038737
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_32imm:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-SSE-NEXT: addss (%rax), %xmm0
+; X64-SSE-NEXT: movss %xmm0, (%rax)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_32imm:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [-1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-AVX-NEXT: vaddss (%rax), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, (%rax)
+; X64-AVX-NEXT: retq
+ %i = load atomic i32, ptr inttoptr (i32 3735928559 to ptr) monotonic, align 4
+ %f = bitcast i32 %i to float
+ %sub = fsub float %f, 1.000000e+00
+ %s = bitcast float %sub to i32
+ store atomic i32 %s, ptr inttoptr (i32 3735928559 to ptr) monotonic, align 4
+ ret void
+}
+
+define dso_local void @fsub_64imm() nounwind {
+; X86-NOSSE-LABEL: fsub_64imm:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll -559038737
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fchs
+; X86-NOSSE-NEXT: faddl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll -559038737
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_64imm:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fld1
+; X86-SSE1-NEXT: fchs
+; X86-SSE1-NEXT: faddl (%esp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, -559038737
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_64imm:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: addsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, -559038737
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_64imm:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vaddsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, -559038737
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_64imm:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0]
+; X64-SSE-NEXT: addsd (%rax), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, (%rax)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_64imm:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [-1.0E+0,0.0E+0]
+; X64-AVX-NEXT: vaddsd (%rax), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rax)
+; X64-AVX-NEXT: retq
+ %i = load atomic i64, ptr inttoptr (i64 3735928559 to ptr) monotonic, align 8
+ %f = bitcast i64 %i to double
+ %sub = fsub double %f, 1.000000e+00
+ %s = bitcast double %sub to i64
+ store atomic i64 %s, ptr inttoptr (i64 3735928559 to ptr) monotonic, align 8
+ ret void
+}
+
+; Floating-point sub to a stack location.
+define dso_local void @fsub_32stack() nounwind {
+; X86-NOSSE-LABEL: fsub_32stack:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $12, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fsubs (%esp)
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: addl $12, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_32stack:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $12, %esp
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE1-NEXT: subss (%esp), %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: addl $12, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_32stack:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: subss (%esp), %xmm0
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_32stack:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-AVX-NEXT: vsubss (%esp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX-NEXT: popl %eax
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_32stack:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-SSE-NEXT: subss -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_32stack:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-AVX-NEXT: vsubss -{{[0-9]+}}(%rsp), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: retq
+ %ptr = alloca i32, align 4
+ %load = load atomic i32, ptr %ptr acquire, align 4
+ %bc0 = bitcast i32 %load to float
+ %fsub = fsub float 1.000000e+00, %bc0
+ %bc1 = bitcast float %fsub to i32
+ store atomic i32 %bc1, ptr %ptr release, align 4
+ ret void
+}
+
+define dso_local void @fsub_64stack() nounwind {
+; X86-NOSSE-LABEL: fsub_64stack:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $40, %esp
+; X86-NOSSE-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fsubl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_64stack:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $24, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fld1
+; X86-SSE1-NEXT: fsubl (%esp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_64stack:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
+; X86-SSE2-NEXT: subsd %xmm0, %xmm1
+; X86-SSE2-NEXT: movsd %xmm1, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_64stack:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $16, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
+; X86-AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_64stack:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; X64-SSE-NEXT: subsd -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_64stack:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; X64-AVX-NEXT: vsubsd -{{[0-9]+}}(%rsp), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: retq
+ %ptr = alloca i64, align 8
+ %load = load atomic i64, ptr %ptr acquire, align 8
+ %bc0 = bitcast i64 %load to double
+ %fsub = fsub double 1.000000e+00, %bc0
+ %bc1 = bitcast double %fsub to i64
+ store atomic i64 %bc1, ptr %ptr release, align 8
+ ret void
+}
+
+define dso_local void @fsub_array(ptr %arg, double %arg1, i64 %arg2) nounwind {
+; X86-NOSSE-LABEL: fsub_array:
+; X86-NOSSE: # %bb.0: # %bb
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: pushl %esi
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $40, %esp
+; X86-NOSSE-NEXT: movl 20(%ebp), %eax
+; X86-NOSSE-NEXT: movl 8(%ebp), %ecx
+; X86-NOSSE-NEXT: fildll (%ecx,%eax,8)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOSSE-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fsubl 12(%ebp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOSSE-NEXT: movl %edx, (%esp)
+; X86-NOSSE-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll (%ecx,%eax,8)
+; X86-NOSSE-NEXT: leal -4(%ebp), %esp
+; X86-NOSSE-NEXT: popl %esi
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fsub_array:
+; X86-SSE1: # %bb.0: # %bb
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: movl 20(%ebp), %eax
+; X86-SSE1-NEXT: movl 8(%ebp), %ecx
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fsubl 12(%ebp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, (%ecx,%eax,8)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fsub_array:
+; X86-SSE2: # %bb.0: # %bb
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movl 20(%ebp), %eax
+; X86-SSE2-NEXT: movl 8(%ebp), %ecx
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: subsd 12(%ebp), %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%ecx,%eax,8)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fsub_array:
+; X86-AVX: # %bb.0: # %bb
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: movl 20(%ebp), %eax
+; X86-AVX-NEXT: movl 8(%ebp), %ecx
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vsubsd 12(%ebp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%ecx,%eax,8)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fsub_array:
+; X64-SSE: # %bb.0: # %bb
+; X64-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE-NEXT: subsd %xmm0, %xmm1
+; X64-SSE-NEXT: movsd %xmm1, (%rdi,%rsi,8)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fsub_array:
+; X64-AVX: # %bb.0: # %bb
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT: vsubsd %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rdi,%rsi,8)
+; X64-AVX-NEXT: retq
+bb:
+ %tmp4 = getelementptr inbounds i64, ptr %arg, i64 %arg2
+ %tmp6 = load atomic i64, ptr %tmp4 monotonic, align 8
+ %tmp7 = bitcast i64 %tmp6 to double
+ %tmp8 = fsub double %tmp7, %arg1
+ %tmp9 = bitcast double %tmp8 to i64
+ store atomic i64 %tmp9, ptr %tmp4 monotonic, align 8
+ ret void
+}
+
+; ----- FMUL -----
+
+define dso_local void @fmul_32r(ptr %loc, float %val) nounwind {
+; X86-NOSSE-LABEL: fmul_32r:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl (%eax), %ecx
+; X86-NOSSE-NEXT: movl %ecx, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fmuls {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, (%eax)
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_32r:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl (%eax), %ecx
+; X86-SSE1-NEXT: movl %ecx, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: mulss {{[0-9]+}}(%esp), %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT: movl %ecx, (%eax)
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_32r:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: mulss (%eax), %xmm0
+; X86-SSE2-NEXT: movss %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_32r:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vmulss (%eax), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, (%eax)
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_32r:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: mulss (%rdi), %xmm0
+; X64-SSE-NEXT: movss %xmm0, (%rdi)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_32r:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmulss (%rdi), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, (%rdi)
+; X64-AVX-NEXT: retq
+ %1 = load atomic i32, ptr %loc seq_cst, align 4
+ %2 = bitcast i32 %1 to float
+ %mul = fmul float %2, %val
+ %3 = bitcast float %mul to i32
+ store atomic i32 %3, ptr %loc release, align 4
+ ret void
+}
+
+define dso_local void @fmul_64r(ptr %loc, double %val) nounwind {
+; X86-NOSSE-LABEL: fmul_64r:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: movl 8(%ebp), %eax
+; X86-NOSSE-NEXT: fildll (%eax)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fmull 12(%ebp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %ecx, (%esp)
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll (%eax)
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_64r:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: movl 8(%ebp), %eax
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fmull 12(%ebp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, (%eax)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_64r:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movl 8(%ebp), %eax
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: mulsd 12(%ebp), %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%eax)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_64r:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: movl 8(%ebp), %eax
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmulsd 12(%ebp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%eax)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_64r:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: mulsd (%rdi), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, (%rdi)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_64r:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmulsd (%rdi), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rdi)
+; X64-AVX-NEXT: retq
+ %1 = load atomic i64, ptr %loc seq_cst, align 8
+ %2 = bitcast i64 %1 to double
+ %mul = fmul double %2, %val
+ %3 = bitcast double %mul to i64
+ store atomic i64 %3, ptr %loc release, align 8
+ ret void
+}
+
+; Floating-point mul to a global using an immediate.
+define dso_local void @fmul_32g() nounwind {
+; X86-NOSSE-LABEL: fmul_32g:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl glob32, %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, glob32
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_32g:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl glob32, %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, glob32
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_32g:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: mulss glob32, %xmm0
+; X86-SSE2-NEXT: movss %xmm0, glob32
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_32g:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-AVX-NEXT: vmulss glob32, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, glob32
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_32g:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-SSE-NEXT: mulss glob32(%rip), %xmm0
+; X64-SSE-NEXT: movss %xmm0, glob32(%rip)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_32g:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-AVX-NEXT: vmulss glob32(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, glob32(%rip)
+; X64-AVX-NEXT: retq
+ %i = load atomic i32, ptr @glob32 monotonic, align 4
+ %f = bitcast i32 %i to float
+ %mul = fmul float %f, 0x400921FA00000000
+ %s = bitcast float %mul to i32
+ store atomic i32 %s, ptr @glob32 monotonic, align 4
+ ret void
+}
+
+define dso_local void @fmul_64g() nounwind {
+; X86-NOSSE-LABEL: fmul_64g:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll glob64
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll glob64
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_64g:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, glob64
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_64g:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, glob64
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_64g:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, glob64
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_64g:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [3.1415901184082031E+0,0.0E+0]
+; X64-SSE-NEXT: mulsd glob64(%rip), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, glob64(%rip)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_64g:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [3.1415901184082031E+0,0.0E+0]
+; X64-AVX-NEXT: vmulsd glob64(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, glob64(%rip)
+; X64-AVX-NEXT: retq
+ %i = load atomic i64, ptr @glob64 monotonic, align 8
+ %f = bitcast i64 %i to double
+ %mul = fmul double %f, 0x400921FA00000000
+ %s = bitcast double %mul to i64
+ store atomic i64 %s, ptr @glob64 monotonic, align 8
+ ret void
+}
+
+; Floating-point mul to a hard-coded immediate location using an immediate.
+define dso_local void @fmul_32imm() nounwind {
+; X86-NOSSE-LABEL: fmul_32imm:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl -559038737, %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, -559038737
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_32imm:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl -559038737, %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, -559038737
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_32imm:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: mulss -559038737, %xmm0
+; X86-SSE2-NEXT: movss %xmm0, -559038737
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_32imm:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-AVX-NEXT: vmulss -559038737, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, -559038737
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_32imm:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-SSE-NEXT: mulss (%rax), %xmm0
+; X64-SSE-NEXT: movss %xmm0, (%rax)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_32imm:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-AVX-NEXT: vmulss (%rax), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, (%rax)
+; X64-AVX-NEXT: retq
+ %i = load atomic i32, ptr inttoptr (i32 3735928559 to ptr) monotonic, align 4
+ %f = bitcast i32 %i to float
+ %mul = fmul float %f, 0x400921FA00000000
+ %s = bitcast float %mul to i32
+ store atomic i32 %s, ptr inttoptr (i32 3735928559 to ptr) monotonic, align 4
+ ret void
+}
+
+define dso_local void @fmul_64imm() nounwind {
+; X86-NOSSE-LABEL: fmul_64imm:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll -559038737
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll -559038737
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_64imm:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, -559038737
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_64imm:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, -559038737
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_64imm:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, -559038737
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_64imm:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [3.1415901184082031E+0,0.0E+0]
+; X64-SSE-NEXT: mulsd (%rax), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, (%rax)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_64imm:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [3.1415901184082031E+0,0.0E+0]
+; X64-AVX-NEXT: vmulsd (%rax), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rax)
+; X64-AVX-NEXT: retq
+ %i = load atomic i64, ptr inttoptr (i64 3735928559 to ptr) monotonic, align 8
+ %f = bitcast i64 %i to double
+ %mul = fmul double %f, 0x400921FA00000000
+ %s = bitcast double %mul to i64
+ store atomic i64 %s, ptr inttoptr (i64 3735928559 to ptr) monotonic, align 8
+ ret void
+}
+
+; Floating-point mul to a stack location.
+define dso_local void @fmul_32stack() nounwind {
+; X86-NOSSE-LABEL: fmul_32stack:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $12, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: addl $12, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_32stack:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $12, %esp
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: mulss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: addl $12, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_32stack:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: mulss (%esp), %xmm0
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_32stack:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-AVX-NEXT: vmulss (%esp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX-NEXT: popl %eax
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_32stack:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-SSE-NEXT: mulss -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_32stack:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [3.14159012E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-AVX-NEXT: vmulss -{{[0-9]+}}(%rsp), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: retq
+ %ptr = alloca i32, align 4
+ %load = load atomic i32, ptr %ptr acquire, align 4
+ %bc0 = bitcast i32 %load to float
+ %fmul = fmul float 0x400921FA00000000, %bc0
+ %bc1 = bitcast float %fmul to i32
+ store atomic i32 %bc1, ptr %ptr release, align 4
+ ret void
+}
+
+define dso_local void @fmul_64stack() nounwind {
+; X86-NOSSE-LABEL: fmul_64stack:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $40, %esp
+; X86-NOSSE-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_64stack:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $24, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fmuls {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_64stack:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: mulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_64stack:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $16, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmulsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_64stack:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [3.1415901184082031E+0,0.0E+0]
+; X64-SSE-NEXT: mulsd -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_64stack:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [3.1415901184082031E+0,0.0E+0]
+; X64-AVX-NEXT: vmulsd -{{[0-9]+}}(%rsp), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: retq
+ %ptr = alloca i64, align 8
+ %load = load atomic i64, ptr %ptr acquire, align 8
+ %bc0 = bitcast i64 %load to double
+ %fmul = fmul double 0x400921FA00000000, %bc0
+ %bc1 = bitcast double %fmul to i64
+ store atomic i64 %bc1, ptr %ptr release, align 8
+ ret void
+}
+
+define dso_local void @fmul_array(ptr %arg, double %arg1, i64 %arg2) nounwind {
+; X86-NOSSE-LABEL: fmul_array:
+; X86-NOSSE: # %bb.0: # %bb
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: pushl %esi
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $40, %esp
+; X86-NOSSE-NEXT: movl 20(%ebp), %eax
+; X86-NOSSE-NEXT: movl 8(%ebp), %ecx
+; X86-NOSSE-NEXT: fildll (%ecx,%eax,8)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOSSE-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fmull 12(%ebp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOSSE-NEXT: movl %edx, (%esp)
+; X86-NOSSE-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll (%ecx,%eax,8)
+; X86-NOSSE-NEXT: leal -4(%ebp), %esp
+; X86-NOSSE-NEXT: popl %esi
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fmul_array:
+; X86-SSE1: # %bb.0: # %bb
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: movl 20(%ebp), %eax
+; X86-SSE1-NEXT: movl 8(%ebp), %ecx
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fmull 12(%ebp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, (%ecx,%eax,8)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fmul_array:
+; X86-SSE2: # %bb.0: # %bb
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movl 20(%ebp), %eax
+; X86-SSE2-NEXT: movl 8(%ebp), %ecx
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: mulsd 12(%ebp), %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%ecx,%eax,8)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fmul_array:
+; X86-AVX: # %bb.0: # %bb
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: movl 20(%ebp), %eax
+; X86-AVX-NEXT: movl 8(%ebp), %ecx
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmulsd 12(%ebp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%ecx,%eax,8)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fmul_array:
+; X64-SSE: # %bb.0: # %bb
+; X64-SSE-NEXT: mulsd (%rdi,%rsi,8), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, (%rdi,%rsi,8)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fmul_array:
+; X64-AVX: # %bb.0: # %bb
+; X64-AVX-NEXT: vmulsd (%rdi,%rsi,8), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rdi,%rsi,8)
+; X64-AVX-NEXT: retq
+bb:
+ %tmp4 = getelementptr inbounds i64, ptr %arg, i64 %arg2
+ %tmp6 = load atomic i64, ptr %tmp4 monotonic, align 8
+ %tmp7 = bitcast i64 %tmp6 to double
+ %tmp8 = fmul double %tmp7, %arg1
+ %tmp9 = bitcast double %tmp8 to i64
+ store atomic i64 %tmp9, ptr %tmp4 monotonic, align 8
+ ret void
+}
+
+; ----- FDIV -----
+
+define dso_local void @fdiv_32r(ptr %loc, float %val) nounwind {
+; X86-NOSSE-LABEL: fdiv_32r:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl (%eax), %ecx
+; X86-NOSSE-NEXT: movl %ecx, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fdivs {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, (%eax)
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_32r:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl (%eax), %ecx
+; X86-SSE1-NEXT: movl %ecx, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: divss {{[0-9]+}}(%esp), %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE1-NEXT: movl %ecx, (%eax)
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_32r:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: divss {{[0-9]+}}(%esp), %xmm0
+; X86-SSE2-NEXT: movss %xmm0, (%eax)
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_32r:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vdivss {{[0-9]+}}(%esp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, (%eax)
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_32r:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: divss %xmm0, %xmm1
+; X64-SSE-NEXT: movss %xmm1, (%rdi)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_32r:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
+; X64-AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, (%rdi)
+; X64-AVX-NEXT: retq
+ %1 = load atomic i32, ptr %loc seq_cst, align 4
+ %2 = bitcast i32 %1 to float
+ %div = fdiv float %2, %val
+ %3 = bitcast float %div to i32
+ store atomic i32 %3, ptr %loc release, align 4
+ ret void
+}
+
+define dso_local void @fdiv_64r(ptr %loc, double %val) nounwind {
+; X86-NOSSE-LABEL: fdiv_64r:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: movl 8(%ebp), %eax
+; X86-NOSSE-NEXT: fildll (%eax)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fdivl 12(%ebp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl %ecx, (%esp)
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll (%eax)
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_64r:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: movl 8(%ebp), %eax
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fdivl 12(%ebp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, (%eax)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_64r:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movl 8(%ebp), %eax
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: divsd 12(%ebp), %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%eax)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_64r:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: movl 8(%ebp), %eax
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vdivsd 12(%ebp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%eax)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_64r:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE-NEXT: divsd %xmm0, %xmm1
+; X64-SSE-NEXT: movsd %xmm1, (%rdi)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_64r:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rdi)
+; X64-AVX-NEXT: retq
+ %1 = load atomic i64, ptr %loc seq_cst, align 8
+ %2 = bitcast i64 %1 to double
+ %div = fdiv double %2, %val
+ %3 = bitcast double %div to i64
+ store atomic i64 %3, ptr %loc release, align 8
+ ret void
+}
+
+; Floating-point div to a global using an immediate.
+define dso_local void @fdiv_32g() nounwind {
+; X86-NOSSE-LABEL: fdiv_32g:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl glob32, %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fdivs {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, glob32
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_32g:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl glob32, %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, glob32
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_32g:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movss %xmm0, glob32
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_32g:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vdivss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, glob32
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_32g:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: movss %xmm0, glob32(%rip)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_32g:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX-NEXT: vdivss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, glob32(%rip)
+; X64-AVX-NEXT: retq
+ %i = load atomic i32, ptr @glob32 monotonic, align 4
+ %f = bitcast i32 %i to float
+ %div = fdiv float %f, 0x400921FA00000000
+ %s = bitcast float %div to i32
+ store atomic i32 %s, ptr @glob32 monotonic, align 4
+ ret void
+}
+
+define dso_local void @fdiv_64g() nounwind {
+; X86-NOSSE-LABEL: fdiv_64g:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll glob64
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fdivs {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll glob64
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_64g:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fdivs {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, glob64
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_64g:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, glob64
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_64g:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vdivsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, glob64
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_64g:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, glob64(%rip)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_64g:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT: vdivsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, glob64(%rip)
+; X64-AVX-NEXT: retq
+ %i = load atomic i64, ptr @glob64 monotonic, align 8
+ %f = bitcast i64 %i to double
+ %div = fdiv double %f, 0x400921FA00000000
+ %s = bitcast double %div to i64
+ store atomic i64 %s, ptr @glob64 monotonic, align 8
+ ret void
+}
+
+; Floating-point div to a hard-coded immediate location using an immediate.
+define dso_local void @fdiv_32imm() nounwind {
+; X86-NOSSE-LABEL: fdiv_32imm:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $8, %esp
+; X86-NOSSE-NEXT: movl -559038737, %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: flds (%esp)
+; X86-NOSSE-NEXT: fdivs {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, -559038737
+; X86-NOSSE-NEXT: addl $8, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_32imm:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $8, %esp
+; X86-SSE1-NEXT: movl -559038737, %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE1-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, -559038737
+; X86-SSE1-NEXT: addl $8, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_32imm:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movss %xmm0, -559038737
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_32imm:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-AVX-NEXT: vdivss {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, -559038737
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_32imm:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: divss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: movss %xmm0, (%rax)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_32imm:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-AVX-NEXT: vdivss {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, (%rax)
+; X64-AVX-NEXT: retq
+ %i = load atomic i32, ptr inttoptr (i32 3735928559 to ptr) monotonic, align 4
+ %f = bitcast i32 %i to float
+ %div = fdiv float %f, 0x400921FA00000000
+ %s = bitcast float %div to i32
+ store atomic i32 %s, ptr inttoptr (i32 3735928559 to ptr) monotonic, align 4
+ ret void
+}
+
+define dso_local void @fdiv_64imm() nounwind {
+; X86-NOSSE-LABEL: fdiv_64imm:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $32, %esp
+; X86-NOSSE-NEXT: fildll -559038737
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fdivs {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll -559038737
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_64imm:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fdivs {{\.?LCPI[0-9]+_[0-9]+}}
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, -559038737
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_64imm:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, -559038737
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_64imm:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vdivsd {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, -559038737
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_64imm:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE-NEXT: divsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, (%rax)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_64imm:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: movl $3735928559, %eax # imm = 0xDEADBEEF
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X64-AVX-NEXT: vdivsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rax)
+; X64-AVX-NEXT: retq
+ %i = load atomic i64, ptr inttoptr (i64 3735928559 to ptr) monotonic, align 8
+ %f = bitcast i64 %i to double
+ %div = fdiv double %f, 0x400921FA00000000
+ %s = bitcast double %div to i64
+ store atomic i64 %s, ptr inttoptr (i64 3735928559 to ptr) monotonic, align 8
+ ret void
+}
+
+; Floating-point div to a stack location.
+define dso_local void @fdiv_32stack() nounwind {
+; X86-NOSSE-LABEL: fdiv_32stack:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: subl $12, %esp
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fdivs (%esp)
+; X86-NOSSE-NEXT: fstps {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: addl $12, %esp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_32stack:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: subl $12, %esp
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, (%esp)
+; X86-SSE1-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE1-NEXT: divss (%esp), %xmm0
+; X86-SSE1-NEXT: movss %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE1-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: addl $12, %esp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_32stack:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %eax
+; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-SSE2-NEXT: divss (%esp), %xmm0
+; X86-SSE2-NEXT: movss %xmm0, (%esp)
+; X86-SSE2-NEXT: popl %eax
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_32stack:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %eax
+; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X86-AVX-NEXT: vdivss (%esp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovss %xmm0, (%esp)
+; X86-AVX-NEXT: popl %eax
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_32stack:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-SSE-NEXT: divss -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_32stack:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovss {{.*#+}} xmm0 = [1.0E+0,0.0E+0,0.0E+0,0.0E+0]
+; X64-AVX-NEXT: vdivss -{{[0-9]+}}(%rsp), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovss %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: retq
+ %ptr = alloca i32, align 4
+ %load = load atomic i32, ptr %ptr acquire, align 4
+ %bc0 = bitcast i32 %load to float
+ %fdiv = fdiv float 1.000000e+00, %bc0
+ %bc1 = bitcast float %fdiv to i32
+ store atomic i32 %bc1, ptr %ptr release, align 4
+ ret void
+}
+
+define dso_local void @fdiv_64stack() nounwind {
+; X86-NOSSE-LABEL: fdiv_64stack:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $40, %esp
+; X86-NOSSE-NEXT: fildll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %eax, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fld1
+; X86-NOSSE-NEXT: fdivl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NOSSE-NEXT: movl %eax, (%esp)
+; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %ebp, %esp
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_64stack:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $24, %esp
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fld1
+; X86-SSE1-NEXT: fdivl (%esp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_64stack:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $16, %esp
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
+; X86-SSE2-NEXT: divsd %xmm0, %xmm1
+; X86-SSE2-NEXT: movsd %xmm1, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_64stack:
+; X86-AVX: # %bb.0:
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $16, %esp
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm1 = [1.0E+0,0.0E+0]
+; X86-AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, {{[0-9]+}}(%esp)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_64stack:
+; X64-SSE: # %bb.0:
+; X64-SSE-NEXT: movsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; X64-SSE-NEXT: divsd -{{[0-9]+}}(%rsp), %xmm0
+; X64-SSE-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_64stack:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm0 = [1.0E+0,0.0E+0]
+; X64-AVX-NEXT: vdivsd -{{[0-9]+}}(%rsp), %xmm0, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, -{{[0-9]+}}(%rsp)
+; X64-AVX-NEXT: retq
+ %ptr = alloca i64, align 8
+ %load = load atomic i64, ptr %ptr acquire, align 8
+ %bc0 = bitcast i64 %load to double
+ %fdiv = fdiv double 1.000000e+00, %bc0
+ %bc1 = bitcast double %fdiv to i64
+ store atomic i64 %bc1, ptr %ptr release, align 8
+ ret void
+}
+
+define dso_local void @fdiv_array(ptr %arg, double %arg1, i64 %arg2) nounwind {
+; X86-NOSSE-LABEL: fdiv_array:
+; X86-NOSSE: # %bb.0: # %bb
+; X86-NOSSE-NEXT: pushl %ebp
+; X86-NOSSE-NEXT: movl %esp, %ebp
+; X86-NOSSE-NEXT: pushl %esi
+; X86-NOSSE-NEXT: andl $-8, %esp
+; X86-NOSSE-NEXT: subl $40, %esp
+; X86-NOSSE-NEXT: movl 20(%ebp), %eax
+; X86-NOSSE-NEXT: movl 8(%ebp), %ecx
+; X86-NOSSE-NEXT: fildll (%ecx,%eax,8)
+; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOSSE-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl %edx, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fldl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fdivl 12(%ebp)
+; X86-NOSSE-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx
+; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NOSSE-NEXT: movl %edx, (%esp)
+; X86-NOSSE-NEXT: movl %esi, {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: fildll (%esp)
+; X86-NOSSE-NEXT: fistpll (%ecx,%eax,8)
+; X86-NOSSE-NEXT: leal -4(%ebp), %esp
+; X86-NOSSE-NEXT: popl %esi
+; X86-NOSSE-NEXT: popl %ebp
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: fdiv_array:
+; X86-SSE1: # %bb.0: # %bb
+; X86-SSE1-NEXT: pushl %ebp
+; X86-SSE1-NEXT: movl %esp, %ebp
+; X86-SSE1-NEXT: andl $-8, %esp
+; X86-SSE1-NEXT: subl $16, %esp
+; X86-SSE1-NEXT: movl 20(%ebp), %eax
+; X86-SSE1-NEXT: movl 8(%ebp), %ecx
+; X86-SSE1-NEXT: xorps %xmm0, %xmm0
+; X86-SSE1-NEXT: xorps %xmm1, %xmm1
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm1 = mem[0,1],xmm1[2,3]
+; X86-SSE1-NEXT: movss %xmm1, (%esp)
+; X86-SSE1-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,1,1]
+; X86-SSE1-NEXT: movss %xmm1, {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: fldl (%esp)
+; X86-SSE1-NEXT: fdivl 12(%ebp)
+; X86-SSE1-NEXT: fstpl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: movlps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; X86-SSE1-NEXT: movlps %xmm0, (%ecx,%eax,8)
+; X86-SSE1-NEXT: movl %ebp, %esp
+; X86-SSE1-NEXT: popl %ebp
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: fdiv_array:
+; X86-SSE2: # %bb.0: # %bb
+; X86-SSE2-NEXT: pushl %ebp
+; X86-SSE2-NEXT: movl %esp, %ebp
+; X86-SSE2-NEXT: andl $-8, %esp
+; X86-SSE2-NEXT: subl $8, %esp
+; X86-SSE2-NEXT: movl 20(%ebp), %eax
+; X86-SSE2-NEXT: movl 8(%ebp), %ecx
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: divsd 12(%ebp), %xmm0
+; X86-SSE2-NEXT: movsd %xmm0, (%esp)
+; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
+; X86-SSE2-NEXT: movlps %xmm0, (%ecx,%eax,8)
+; X86-SSE2-NEXT: movl %ebp, %esp
+; X86-SSE2-NEXT: popl %ebp
+; X86-SSE2-NEXT: retl
+;
+; X86-AVX-LABEL: fdiv_array:
+; X86-AVX: # %bb.0: # %bb
+; X86-AVX-NEXT: pushl %ebp
+; X86-AVX-NEXT: movl %esp, %ebp
+; X86-AVX-NEXT: andl $-8, %esp
+; X86-AVX-NEXT: subl $8, %esp
+; X86-AVX-NEXT: movl 20(%ebp), %eax
+; X86-AVX-NEXT: movl 8(%ebp), %ecx
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vdivsd 12(%ebp), %xmm0, %xmm0
+; X86-AVX-NEXT: vmovsd %xmm0, (%esp)
+; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X86-AVX-NEXT: vmovlps %xmm0, (%ecx,%eax,8)
+; X86-AVX-NEXT: movl %ebp, %esp
+; X86-AVX-NEXT: popl %ebp
+; X86-AVX-NEXT: retl
+;
+; X64-SSE-LABEL: fdiv_array:
+; X64-SSE: # %bb.0: # %bb
+; X64-SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE-NEXT: divsd %xmm0, %xmm1
+; X64-SSE-NEXT: movsd %xmm1, (%rdi,%rsi,8)
+; X64-SSE-NEXT: retq
+;
+; X64-AVX-LABEL: fdiv_array:
+; X64-AVX: # %bb.0: # %bb
+; X64-AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT: vdivsd %xmm0, %xmm1, %xmm0
+; X64-AVX-NEXT: vmovsd %xmm0, (%rdi,%rsi,8)
+; X64-AVX-NEXT: retq
+bb:
+ %tmp4 = getelementptr inbounds i64, ptr %arg, i64 %arg2
+ %tmp6 = load atomic i64, ptr %tmp4 monotonic, align 8
+ %tmp7 = bitcast i64 %tmp6 to double
+ %tmp8 = fdiv double %tmp7, %arg1
+ %tmp9 = bitcast double %tmp8 to i64
+ store atomic i64 %tmp9, ptr %tmp4 monotonic, align 8
+ ret void
+}
>From 31fb504efb1ca5d9f75e0b374b4afeef304bad9f Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Sat, 8 Jun 2024 08:35:25 -0700
Subject: [PATCH 53/57] [memprof] Make Version3 officially available (#94837)
---
llvm/include/llvm/ProfileData/MemProf.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/llvm/include/llvm/ProfileData/MemProf.h b/llvm/include/llvm/ProfileData/MemProf.h
index a6501493172b2..53ddfd1923410 100644
--- a/llvm/include/llvm/ProfileData/MemProf.h
+++ b/llvm/include/llvm/ProfileData/MemProf.h
@@ -28,7 +28,8 @@ enum IndexedVersion : uint64_t {
Version1 = 1,
// Version 2: Added a call stack table.
Version2 = 2,
- // Version 3: Under development.
+ // Version 3: Added a radix tree for call stacks. Switched to linear IDs for
+ // frames and call stacks.
Version3 = 3,
};
>From 473d6fdae168083ed93a07a7e78959f2ec1df6a7 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Sat, 8 Jun 2024 08:36:42 -0700
Subject: [PATCH 54/57] [ProfileData] Use a range-based for loop (NFC) (#94856)
While I am at it, this patch adds const to a couple of places.
---
llvm/lib/ProfileData/InstrProfReader.cpp | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/ProfileData/InstrProfReader.cpp b/llvm/lib/ProfileData/InstrProfReader.cpp
index 7758363d9c952..27855bf92b871 100644
--- a/llvm/lib/ProfileData/InstrProfReader.cpp
+++ b/llvm/lib/ProfileData/InstrProfReader.cpp
@@ -145,11 +145,11 @@ readBinaryIdsInternal(const MemoryBuffer &DataBuffer,
static void
printBinaryIdsInternal(raw_ostream &OS,
- std::vector<llvm::object::BuildID> &BinaryIds) {
+ const std::vector<llvm::object::BuildID> &BinaryIds) {
OS << "Binary IDs: \n";
- for (auto BI : BinaryIds) {
- for (uint64_t I = 0; I < BI.size(); I++)
- OS << format("%02x", BI[I]);
+ for (const auto &BI : BinaryIds) {
+ for (auto I : BI)
+ OS << format("%02x", I);
OS << "\n";
}
}
>From a3fa99ebc93b3f2b2eb5cee414cb176fa88111f0 Mon Sep 17 00:00:00 2001
From: Kazu Hirata <kazu at google.com>
Date: Sat, 8 Jun 2024 08:38:36 -0700
Subject: [PATCH 55/57] [memprof] Remove redundant virtual (NFC) (#94858)
'override' makes 'virtual' redundant.
Identified with modernize-use-override.
---
llvm/include/llvm/ProfileData/MemProfReader.h | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/llvm/include/llvm/ProfileData/MemProfReader.h b/llvm/include/llvm/ProfileData/MemProfReader.h
index f0286820fa462..fbba6483abe3e 100644
--- a/llvm/include/llvm/ProfileData/MemProfReader.h
+++ b/llvm/include/llvm/ProfileData/MemProfReader.h
@@ -137,7 +137,7 @@ class RawMemProfReader final : public MemProfReader {
public:
RawMemProfReader(const RawMemProfReader &) = delete;
RawMemProfReader &operator=(const RawMemProfReader &) = delete;
- virtual ~RawMemProfReader() override = default;
+ ~RawMemProfReader() override = default;
// Prints the contents of the profile in YAML format.
void printYAML(raw_ostream &OS);
@@ -161,7 +161,7 @@ class RawMemProfReader final : public MemProfReader {
// Returns a list of build ids recorded in the segment information.
static std::vector<std::string> peekBuildIds(MemoryBuffer *DataBuffer);
- virtual Error
+ Error
readNextRecord(GuidMemProfRecordPair &GuidRecord,
std::function<const Frame(const FrameId)> Callback) override;
>From 24d344b2ea86808103c311586f0287c33c6ff679 Mon Sep 17 00:00:00 2001
From: Nikolas Klauser <nikolasklauser at berlin.de>
Date: Sat, 8 Jun 2024 17:49:53 +0200
Subject: [PATCH 56/57] [libc++][NFC] Simplify the implementation of
`__promote` (#81379)
This depends on enabling the use of extensions.
---
libcxx/include/__type_traits/promote.h | 42 +++++++++++++++++++++++---
1 file changed, 38 insertions(+), 4 deletions(-)
diff --git a/libcxx/include/__type_traits/promote.h b/libcxx/include/__type_traits/promote.h
index e22b4a422c2c8..2b2a6843b9150 100644
--- a/libcxx/include/__type_traits/promote.h
+++ b/libcxx/include/__type_traits/promote.h
@@ -11,8 +11,12 @@
#include <__config>
#include <__type_traits/integral_constant.h>
-#include <__type_traits/is_same.h>
-#include <__utility/declval.h>
+#include <__type_traits/is_arithmetic.h>
+
+#if defined(_LIBCPP_CLANG_VER) && _LIBCPP_CLANG_VER == 1700
+# include <__type_traits/is_same.h>
+# include <__utility/declval.h>
+#endif
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@@ -20,6 +24,34 @@
_LIBCPP_BEGIN_NAMESPACE_STD
+// TODO(LLVM-20): Remove this workaround
+#if !defined(_LIBCPP_CLANG_VER) || _LIBCPP_CLANG_VER != 1700
+
+template <class... _Args>
+class __promote {
+ static_assert((is_arithmetic<_Args>::value && ...));
+
+ static float __test(float);
+ static double __test(char);
+ static double __test(int);
+ static double __test(unsigned);
+ static double __test(long);
+ static double __test(unsigned long);
+ static double __test(long long);
+ static double __test(unsigned long long);
+# ifndef _LIBCPP_HAS_NO_INT128
+ static double __test(__int128_t);
+ static double __test(__uint128_t);
+# endif
+ static double __test(double);
+ static long double __test(long double);
+
+public:
+ using type = decltype((__test(_Args()) + ...));
+};
+
+#else
+
template <class _Tp>
struct __numeric_type {
static void __test(...);
@@ -31,10 +63,10 @@ struct __numeric_type {
static double __test(unsigned long);
static double __test(long long);
static double __test(unsigned long long);
-#ifndef _LIBCPP_HAS_NO_INT128
+# ifndef _LIBCPP_HAS_NO_INT128
static double __test(__int128_t);
static double __test(__uint128_t);
-#endif
+# endif
static double __test(double);
static long double __test(long double);
@@ -89,6 +121,8 @@ class __promote_imp<_A1, void, void, true> {
template <class _A1, class _A2 = void, class _A3 = void>
class __promote : public __promote_imp<_A1, _A2, _A3> {};
+#endif // !defined(_LIBCPP_CLANG_VER) || _LIBCPP_CLANG_VER >= 1700
+
_LIBCPP_END_NAMESPACE_STD
#endif // _LIBCPP___TYPE_TRAITS_PROMOTE_H
>From 619189446484702fdf66dc43009cf6606dc4a8cd Mon Sep 17 00:00:00 2001
From: Sam Elliott <quic_aelliott at quicinc.com>
Date: Sat, 8 Jun 2024 17:53:48 +0100
Subject: [PATCH 57/57] [RISCV][MC] Implicit 0-offset aliases for JR/JALR
(#94688)
This broadly follows how in almost all places, we accept `(<reg>)` to
mean `0(<reg>)`, but I think these are the first like this for Jumps
rather than Loads/Stores. These are accepted by binutils but not by
LLVM: https://godbolt.org/z/GK7MGE7q7
---
llvm/lib/Target/RISCV/RISCVInstrInfo.td | 3 +++
llvm/test/MC/RISCV/rvi-aliases-valid.s | 10 ++++++++++
2 files changed, 13 insertions(+)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.td b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
index 010e07f65cec4..4cdf08a46f285 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.td
@@ -943,6 +943,9 @@ def : InstAlias<"ret", (JALR X0, X1, 0), 4>;
def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>;
def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>;
def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>;
+def : InstAlias<"jr (${rs})", (JALR X0, GPR:$rs, 0), 0>;
+def : InstAlias<"jalr (${rs})", (JALR X1, GPR:$rs, 0), 0>;
+def : InstAlias<"jalr $rd, (${rs})", (JALR GPR:$rd, GPR:$rs, 0), 0>;
def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw
diff --git a/llvm/test/MC/RISCV/rvi-aliases-valid.s b/llvm/test/MC/RISCV/rvi-aliases-valid.s
index 098d5c132c98c..9ac6a8a2c1e75 100644
--- a/llvm/test/MC/RISCV/rvi-aliases-valid.s
+++ b/llvm/test/MC/RISCV/rvi-aliases-valid.s
@@ -190,6 +190,16 @@ jalr x25, x26, 11
# CHECK-S-OBJ-NOALIAS: jalr zero, 0(ra)
# CHECK-S-OBJ: ret
ret
+# CHECK-S-OBJ-NOALIAS: jalr zero, 0(s11)
+# CHECK-S-OBJ: jr s11
+jr (x27)
+# CHECK-S-OBJ-NOALIAS: jalr ra, 0(t3)
+# CHECK-S-OBJ: jalr t3
+jalr (x28)
+# CHECK-S-OBJ-NOALIAS: jalr t4, 0(t5)
+# CHECK-S-OBJ: jalr t4, t5
+jalr x29, (x30)
+
# TODO call
# TODO tail
More information about the Mlir-commits
mailing list