[llvm] c22081c - [GlobalISel] Diagnose inline assembly constraint lowering errors (#135782)

via llvm-commits llvm-commits at lists.llvm.org
Wed May 7 05:13:28 PDT 2025


Author: Pierre van Houtryve
Date: 2025-05-07T14:13:25+02:00
New Revision: c22081c320340d0e7542b247ee093ca515509b52

URL: https://github.com/llvm/llvm-project/commit/c22081c320340d0e7542b247ee093ca515509b52
DIFF: https://github.com/llvm/llvm-project/commit/c22081c320340d0e7542b247ee093ca515509b52.diff

LOG: [GlobalISel] Diagnose inline assembly constraint lowering errors (#135782)

Instead of printing something to dbgs (which is not visible to all users),
emit a diagnostic like the DAG does. We still crash later because we fail to
select the inline assembly, but at least now users will know why it's crashing.

In a future patch we could also recover from the error like the DAG does, so the
lowering can keep going until it either crashes or gives a different error later.

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-lowering-errors.ll

Modified: 
    llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
    llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
    llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
index 81f25b21a0409..1b103c8fc6226 100644
--- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp
@@ -16,6 +16,7 @@
 #include "llvm/CodeGen/MachineOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/IR/DiagnosticInfo.h"
 #include "llvm/IR/Module.h"
 
 #define DEBUG_TYPE "inline-asm-lowering"
@@ -231,6 +232,15 @@ bool InlineAsmLowering::lowerInlineAsm(
   TargetLowering::AsmOperandInfoVector TargetConstraints =
       TLI->ParseConstraints(DL, TRI, Call);
 
+  const auto ConstraintError = [&](const GISelAsmOperandInfo &Info, Twine Msg) {
+    LLVMContext &Ctx = MIRBuilder.getContext();
+    Ctx.diagnose(DiagnosticInfoInlineAsm(
+        Call, "invalid constraint '" + Info.ConstraintCode + "': " + Msg));
+    // TODO: Recover if fallback isn't used. Otherwise let the fallback to DAG
+    // kick in.
+    return false;
+  };
+
   ExtraFlags ExtraInfo(Call);
   unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
   unsigned ResNo = 0; // ResNo - The result number of the next output.
@@ -243,8 +253,8 @@ bool InlineAsmLowering::lowerInlineAsm(
       OpInfo.CallOperandVal = const_cast<Value *>(Call.getArgOperand(ArgNo));
 
       if (isa<BasicBlock>(OpInfo.CallOperandVal)) {
-        LLVM_DEBUG(dbgs() << "Basic block input operands not supported yet\n");
-        return false;
+        return ConstraintError(OpInfo,
+                               "basic block input operands not supported yet");
       }
 
       Type *OpTy = OpInfo.CallOperandVal->getType();
@@ -258,9 +268,8 @@ bool InlineAsmLowering::lowerInlineAsm(
 
       // FIXME: Support aggregate input operands
       if (!OpTy->isSingleValueType()) {
-        LLVM_DEBUG(
-            dbgs() << "Aggregate input operands are not supported yet\n");
-        return false;
+        return ConstraintError(OpInfo,
+                               "aggregate input operands not supported yet");
       }
 
       OpInfo.ConstraintVT =
@@ -344,9 +353,8 @@ bool InlineAsmLowering::lowerInlineAsm(
 
         // Find a register that we can use.
         if (OpInfo.Regs.empty()) {
-          LLVM_DEBUG(dbgs()
-                     << "Couldn't allocate output register for constraint\n");
-          return false;
+          return ConstraintError(
+              OpInfo, "could not allocate output register for constraint");
         }
 
         // Add information to the INLINEASM instruction to know that this
@@ -389,13 +397,13 @@ bool InlineAsmLowering::lowerInlineAsm(
 
         const InlineAsm::Flag MatchedOperandFlag(Inst->getOperand(InstFlagIdx).getImm());
         if (MatchedOperandFlag.isMemKind()) {
-          LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not "
-                               "supported. This should be target specific.\n");
-          return false;
+          return ConstraintError(
+              OpInfo,
+              "matching input constraint to mem operand not supported; this "
+              "should be target specific");
         }
         if (!MatchedOperandFlag.isRegDefKind() && !MatchedOperandFlag.isRegDefEarlyClobberKind()) {
-          LLVM_DEBUG(dbgs() << "Unknown matching constraint\n");
-          return false;
+          return ConstraintError(OpInfo, "unknown matching constraint");
         }
 
         // We want to tie input to register in next operand.
@@ -425,9 +433,10 @@ bool InlineAsmLowering::lowerInlineAsm(
 
       if (OpInfo.ConstraintType == TargetLowering::C_Other &&
           OpInfo.isIndirect) {
-        LLVM_DEBUG(dbgs() << "Indirect input operands with unknown constraint "
-                             "not supported yet\n");
-        return false;
+        return ConstraintError(
+            OpInfo,
+            "indirect input operands with unknown constraint not supported "
+            "yet");
       }
 
       if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
@@ -437,9 +446,7 @@ bool InlineAsmLowering::lowerInlineAsm(
         if (!lowerAsmOperandForConstraint(OpInfo.CallOperandVal,
                                           OpInfo.ConstraintCode, Ops,
                                           MIRBuilder)) {
-          LLVM_DEBUG(dbgs() << "Don't support constraint: "
-                            << OpInfo.ConstraintCode << " yet\n");
-          return false;
+          return ConstraintError(OpInfo, "unsupported constraint");
         }
 
         assert(Ops.size() > 0 &&
@@ -456,9 +463,8 @@ bool InlineAsmLowering::lowerInlineAsm(
       if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
 
         if (!OpInfo.isIndirect) {
-          LLVM_DEBUG(dbgs()
-                     << "Cannot indirectify memory input operands yet\n");
-          return false;
+          return ConstraintError(
+              OpInfo, "indirect memory input operands are not supported yet");
         }
 
         assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!");
@@ -482,18 +488,15 @@ bool InlineAsmLowering::lowerInlineAsm(
              "Unknown constraint type!");
 
       if (OpInfo.isIndirect) {
-        LLVM_DEBUG(dbgs() << "Can't handle indirect register inputs yet "
-                             "for constraint '"
-                          << OpInfo.ConstraintCode << "'\n");
-        return false;
+        return ConstraintError(
+            OpInfo, "indirect register inputs are not supported yet");
       }
 
       // Copy the input into the appropriate registers.
       if (OpInfo.Regs.empty()) {
-        LLVM_DEBUG(
-            dbgs()
-            << "Couldn't allocate input register for register constraint\n");
-        return false;
+        return ConstraintError(
+            OpInfo,
+            "could not allocate input register for register constraint");
       }
 
       unsigned NumRegs = OpInfo.Regs.size();
@@ -503,9 +506,10 @@ bool InlineAsmLowering::lowerInlineAsm(
              "source registers");
 
       if (NumRegs > 1) {
-        LLVM_DEBUG(dbgs() << "Input operands with multiple input registers are "
-                             "not supported yet\n");
-        return false;
+        return ConstraintError(
+            OpInfo,
+            "input operands with multiple input registers are not supported "
+            "yet");
       }
 
       InlineAsm::Flag Flag(InlineAsm::Kind::RegUse, NumRegs);

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
index 29c320da6c0a7..12795c5609c96 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-fallback.ll
@@ -1,13 +1,17 @@
-; RUN: llc -O0 -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' -verify-machineinstrs %s -o %t.out 2> %t.err
+; RUN: not llc -O0 -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' -verify-machineinstrs %s -o - > %t.out 2> %t.err
 ; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-OUT < %t.out
 ; RUN: FileCheck %s --check-prefix=FALLBACK-WITH-REPORT-ERR < %t.err
 ; RUN: not --crash llc -global-isel -mtriple aarch64_be %s -o - 2>&1 | FileCheck %s --check-prefix=BIG-ENDIAN
+
 ; This file checks that the fallback path to selection dag works.
 ; The test is fragile in the sense that it must be updated to expose
 ; something that fails with global-isel.
 ; When we cannot produce a test case anymore, that means we can remove
 ; the fallback path.
 
+; -o - > %t.out is used instead of -o %t.out because llc does not write the output
+; file if an error is emitted, but it will still print to stdout.
+
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 target triple = "aarch64--"
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll b/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
index 778f4e2f9ec01..c04e3111fd16e 100644
--- a/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-preserve-all.ll
@@ -43,7 +43,7 @@ define preserve_allcc void @preserve_all() {
 define dso_local void @normal_cc_caller() {
 entry:
   %v = alloca i32, align 4
-  call void asm sideeffect "mov x9, $0", "N,~{x9}"(i32 48879) #2
+  call void asm sideeffect "mov x9, $0", "n,~{x9}"(i32 48879) #2
   call void asm sideeffect "movi v9.2d, #0","~{v9}" () #2
 
 

diff  --git a/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll b/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
index f8196860aa34f..9e97dace29012 100644
--- a/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-preserve-most.ll
@@ -29,7 +29,7 @@ define preserve_mostcc void @preserve_most() {
 define dso_local void @normal_cc_caller() {
 entry:
   %v = alloca i32, align 4
-  call void asm sideeffect "mov x9, $0", "N,~{x9}"(i32 48879) #2
+  call void asm sideeffect "mov x9, $0", "n,~{x9}"(i32 48879) #2
   call preserve_mostcc void @preserve_most()
   %0 = load i32, ptr %v, align 4
   %1 = call i32 asm sideeffect "mov ${0:w}, w9", "=r,r"(i32 %0) #2

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-lowering-errors.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-lowering-errors.ll
new file mode 100644
index 0000000000000..8f99749e76d79
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-lowering-errors.ll
@@ -0,0 +1,30 @@
+; RUN: not llc -mtriple=amdgcn -mcpu=fiji -O0 -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' %s -o - 2>&1 | FileCheck %s
+
+; CHECK: error: invalid constraint '': aggregate input operands not supported yet
+define amdgpu_kernel void @aggregates([4 x i8] %val) {
+  tail call void asm sideeffect "s_nop", "r"([4 x i8] %val)
+  ret void
+}
+
+; CHECK: error: invalid constraint '{s999}': could not allocate output register for constraint
+define amdgpu_kernel void @bad_output() {
+  tail call i32 asm sideeffect "s_nop", "={s999}"()
+  ret void
+}
+
+; CHECK: error: invalid constraint '{s998}': could not allocate input register for register constraint
+define amdgpu_kernel void @bad_input() {
+  tail call void asm sideeffect "s_nop", "{s998}"(i32 poison)
+  ret void
+}
+; CHECK: error: invalid constraint '{s997}': indirect register inputs are not supported yet
+define amdgpu_kernel void @indirect_input() {
+  tail call void asm sideeffect "s_nop", "*{s997}"(ptr elementtype(i32) poison)
+  ret void
+}
+
+; CHECK: error: invalid constraint 'i': unsupported constraint
+define amdgpu_kernel void @badimm() {
+  tail call void asm sideeffect "s_nop", "i"(i32 poison)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll
index 0b0c7b7df2570..1f7700b14e618 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inline-asm-mismatched-size.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' -mtriple=amdgcn -mcpu=fiji -stop-after=irtranslator -verify-machineinstrs %s -o - 2>%t | FileCheck %s
+; RUN: not llc -global-isel -global-isel-abort=2 -pass-remarks-missed='gisel*' -mtriple=amdgcn -mcpu=fiji -stop-after=irtranslator -verify-machineinstrs %s -o - 2>%t | FileCheck %s
 ; RUN: FileCheck -check-prefix=ERR %s < %t
 
 ; ERR: remark: <unknown>:0:0: unable to translate instruction: call: '  %sgpr = call <4 x i32> asm sideeffect "; def $0", "={s[8:12]}"()' (in function: return_type_is_too_big_vector)


        


More information about the llvm-commits mailing list