[PATCH] D155087: [CodeGen] Fix assertion failure caused by inline assembly

Qi Hu via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 13 15:02:57 PDT 2023


Qi-Hu updated this revision to Diff 540191.

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D155087/new/

https://reviews.llvm.org/D155087

Files:
  llvm/lib/CodeGen/RegAllocFast.cpp
  llvm/test/CodeGen/X86/inline-asm-assertion.ll


Index: llvm/test/CodeGen/X86/inline-asm-assertion.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/X86/inline-asm-assertion.ll
@@ -0,0 +1,61 @@
+; REQUIRES: x86-registered-target
+; RUN: not llc -O0 < %s 2>&1 | FileCheck %s
+; CHECK: error: inline assembly requires more registers than available
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+; Function Attrs: noinline nounwind optnone uwtable
+define dso_local i32 @main() #0 {
+entry:
+  %r0 = alloca i32, align 4
+  %r1 = alloca i32, align 4
+  %r2 = alloca i32, align 4
+  %r3 = alloca i32, align 4
+  %r4 = alloca i32, align 4
+  %r5 = alloca i32, align 4
+  %r6 = alloca i32, align 4
+  %r7 = alloca i32, align 4
+  %r8 = alloca i32, align 4
+  %r9 = alloca i32, align 4
+  %r10 = alloca i32, align 4
+  %r11 = alloca i32, align 4
+  %r12 = alloca i32, align 4
+  %r13 = alloca i32, align 4
+  %r14 = alloca i32, align 4
+  %0 = call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm "movl $$0, $0;movl $$11, $1;movl $$22, $2;movl $$33, $3;movl $$44, $4;movl $$55, $5;movl $$66, $6;movl $$77, $7;movl $$88, $8;movl $$99, $9;movl $$110, $10;movl $$121, $11;movl $$132, $12;movl $$143, $13;movl $$143, $14;", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,~{dirflag},~{fpsr},~{flags}"() #1
+  %asmresult = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 0
+  %asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 1
+  %asmresult2 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 2
+  %asmresult3 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 3
+  %asmresult4 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 4
+  %asmresult5 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 5
+  %asmresult6 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 6
+  %asmresult7 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 7
+  %asmresult8 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 8
+  %asmresult9 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 9
+  %asmresult10 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 10
+  %asmresult11 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 11
+  %asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 12
+  %asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 13
+  %asmresult14 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %0, 14
+  store i32 %asmresult, ptr %r0, align 4
+  store i32 %asmresult1, ptr %r1, align 4
+  store i32 %asmresult2, ptr %r2, align 4
+  store i32 %asmresult3, ptr %r3, align 4
+  store i32 %asmresult4, ptr %r4, align 4
+  store i32 %asmresult5, ptr %r5, align 4
+  store i32 %asmresult6, ptr %r6, align 4
+  store i32 %asmresult7, ptr %r7, align 4
+  store i32 %asmresult8, ptr %r8, align 4
+  store i32 %asmresult9, ptr %r9, align 4
+  store i32 %asmresult10, ptr %r10, align 4
+  store i32 %asmresult11, ptr %r11, align 4
+  store i32 %asmresult12, ptr %r12, align 4
+  store i32 %asmresult13, ptr %r13, align 4
+  store i32 %asmresult14, ptr %r14, align 4
+  ret i32 0
+}
+
+attributes #0 = { noinline nounwind optnone uwtable "frame-pointer"="all" "min-legal-vector-width"="0" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="x86-64" "target-features"="+cx8,+fxsr,+mmx,+sse,+sse2,+x87" "tune-cpu"="generic" }
+attributes #1 = { nounwind readnone }
Index: llvm/lib/CodeGen/RegAllocFast.cpp
===================================================================
--- llvm/lib/CodeGen/RegAllocFast.cpp
+++ llvm/lib/CodeGen/RegAllocFast.cpp
@@ -932,9 +932,16 @@
       }
     }
   }
-  if (LRI->PhysReg == 0)
+  if (LRI->PhysReg == 0) {
     allocVirtReg(MI, *LRI, 0, LookAtPhysRegUses);
-  else {
+    // If no physical register is available for LRI, we assign one at random
+    // and bail out of this function immediately.
+    if (LRI->Error) {
+      const TargetRegisterClass &RC = *MRI->getRegClass(VirtReg);
+      ArrayRef<MCPhysReg> AllocationOrder = RegClassInfo.getOrder(&RC);
+      return setPhysReg(MI, MO, *AllocationOrder.begin());
+    }
+  } else {
     assert(!isRegUsedInInstr(LRI->PhysReg, LookAtPhysRegUses) &&
            "TODO: preassign mismatch");
     LLVM_DEBUG(dbgs() << "In def of " << printReg(VirtReg, TRI)
@@ -943,7 +950,6 @@
   }
 
   MCPhysReg PhysReg = LRI->PhysReg;
-  assert(PhysReg != 0 && "Register not assigned");
   if (LRI->Reloaded || LRI->LiveOut) {
     if (!MI.isImplicitDef()) {
       MachineBasicBlock::iterator SpillBefore =


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D155087.540191.patch
Type: text/x-patch
Size: 5243 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20230713/05633c61/attachment.bin>


More information about the llvm-commits mailing list