[llvm] [AArch64][Win] Work around an MSVC arm64 compiler bug (PR #67865)

Hiroshi Yamauchi via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 29 17:31:40 PDT 2023


https://github.com/hjyamauchi updated https://github.com/llvm/llvm-project/pull/67865

>From 84784e044dfc4601df53bcbb96a0d9f60cf6e926 Mon Sep 17 00:00:00 2001
From: Hiroshi Yamauchi <hjyamauchi at gmail.com>
Date: Fri, 29 Sep 2023 15:45:45 -0700
Subject: [PATCH] [AArch64][Win] Work around an MSVC arm64 compiler bug

The MSVC compiler 19.37 for ARM64 from Visual Studio 17.7.4 has an
optimization bug that causes an incorrect behavior with
isAdvSIMDModImmType10() and causes the test
test/CodeGen/AArch64/arm64-build-vector.ll to fail. Work around by
using a slightly different variation.

Bug: https://developercommunity.visualstudio.com/t/C-ARM64-compiler-optimization-bug/10481261
---
 .../MCTargetDesc/AArch64AddressingModes.h     | 22 +++++++++++++++++++
 1 file changed, 22 insertions(+)

diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
index 11c314dc88def7e..03cbd272757e78e 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h
@@ -591,6 +591,27 @@ static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
 // aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
 // cmode: 1110, op: 1
 static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
+#if defined(_MSC_VER) && _MSC_VER == 1937 && !defined(__clang__) &&            \
+    defined(_M_ARM64)
+  // The MSVC compiler 19.37 for ARM64 has an optimization bug that
+  // causes an incorrect behavior with the orignal version. Work around
+  // by using a slightly different variation.
+  // https://developercommunity.visualstudio.com/t/C-ARM64-compiler-optimization-bug/10481261
+  constexpr uint64_t Mask = 0xFFULL;
+  uint64_t ByteA = (Imm >> 56) & Mask;
+  uint64_t ByteB = (Imm >> 48) & Mask;
+  uint64_t ByteC = (Imm >> 40) & Mask;
+  uint64_t ByteD = (Imm >> 32) & Mask;
+  uint64_t ByteE = (Imm >> 24) & Mask;
+  uint64_t ByteF = (Imm >> 16) & Mask;
+  uint64_t ByteG = (Imm >> 8) & Mask;
+  uint64_t ByteH = Imm & Mask;
+
+  return (ByteA == 0ULL || ByteA == Mask) && (ByteB == 0ULL || ByteB == Mask) &&
+         (ByteC == 0ULL || ByteC == Mask) && (ByteD == 0ULL || ByteD == Mask) &&
+         (ByteE == 0ULL || ByteE == Mask) && (ByteF == 0ULL || ByteF == Mask) &&
+         (ByteG == 0ULL || ByteG == Mask) && (ByteH == 0ULL || ByteH == Mask);
+#else
   uint64_t ByteA = Imm & 0xff00000000000000ULL;
   uint64_t ByteB = Imm & 0x00ff000000000000ULL;
   uint64_t ByteC = Imm & 0x0000ff0000000000ULL;
@@ -608,6 +629,7 @@ static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
          (ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL) &&
          (ByteG == 0ULL || ByteG == 0x000000000000ff00ULL) &&
          (ByteH == 0ULL || ByteH == 0x00000000000000ffULL);
+#endif
 }
 
 static inline uint8_t encodeAdvSIMDModImmType10(uint64_t Imm) {



More information about the llvm-commits mailing list