[llvm] r275786 - [inlineasm] Propagate operand constraints to the backend

Simon Dardis via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 18 06:17:31 PDT 2016


Author: sdardis
Date: Mon Jul 18 08:17:31 2016
New Revision: 275786

URL: http://llvm.org/viewvc/llvm-project?rev=275786&view=rev
Log:
[inlineasm] Propagate operand constraints to the backend

When SelectionDAGISel transforms a node representing an inline asm
block, memory constraint information is not preserved. This can cause
constraints to be broken when a memory offset is of the form:

offset + frame index

when the frame is resolved.

By propagating the constraints all the way to the backend, targets can
enforce memory operands of inline assembly to conform to their constraints.

For MIPSR6, some instructions had their offsets reduced to 9 bits from
16 bits such as ll/sc. This becomes problematic when using inline assembly
to perform atomic operations, as an offset can generated that is too big to
encode in the instruction.

Reviewers: dsanders, vkalintris

Differential Review: https://reviews.llvm.org/D21615

Added:
    llvm/trunk/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll
Modified:
    llvm/trunk/include/llvm/IR/InlineAsm.h
    llvm/trunk/lib/CodeGen/MachineInstr.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
    llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp

Modified: llvm/trunk/include/llvm/IR/InlineAsm.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/InlineAsm.h?rev=275786&r1=275785&r2=275786&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/InlineAsm.h (original)
+++ llvm/trunk/include/llvm/IR/InlineAsm.h Mon Jul 18 08:17:31 2016
@@ -272,6 +272,16 @@ public:
     return Kind | (NumOps << 3);
   }
 
+  static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
+  static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
+  static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
+  static bool isRegDefEarlyClobberKind(unsigned Flag) {
+    return getKind(Flag) == Kind_RegDefEarlyClobber;
+  }
+  static bool isClobberKind(unsigned Flag) {
+    return getKind(Flag) == Kind_Clobber;
+  }
+
   /// getFlagWordForMatchingOp - Augment an existing flag word returned by
   /// getFlagWord with information indicating that this input operand is tied
   /// to a previous output operand.
@@ -290,6 +300,8 @@ public:
   static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC) {
     // Store RC + 1, reserve the value 0 to mean 'no register class'.
     ++RC;
+    assert(!isImmKind(InputFlag) && "Immediates cannot have a register class");
+    assert(!isMemKind(InputFlag) && "Memory operand cannot have a register class");
     assert(RC <= 0x7fff && "Too large register class ID");
     assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
     return InputFlag | (RC << 16);
@@ -298,6 +310,7 @@ public:
   /// Augment an existing flag word returned by getFlagWord with the constraint
   /// code for a memory constraint.
   static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) {
+    assert(isMemKind(InputFlag) && "InputFlag is not a memory constraint!");
     assert(Constraint <= 0x7fff && "Too large a memory constraint ID");
     assert(Constraint <= Constraints_Max && "Unknown constraint ID");
     assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
@@ -313,16 +326,6 @@ public:
     return Flags & 7;
   }
 
-  static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
-  static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
-  static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
-  static bool isRegDefEarlyClobberKind(unsigned Flag) {
-    return getKind(Flag) == Kind_RegDefEarlyClobber;
-  }
-  static bool isClobberKind(unsigned Flag) {
-    return getKind(Flag) == Kind_Clobber;
-  }
-
   static unsigned getMemoryConstraintID(unsigned Flag) {
     assert(isMemKind(Flag));
     return (Flag >> Constraints_ShiftAmount) & 0x7fff;

Modified: llvm/trunk/lib/CodeGen/MachineInstr.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MachineInstr.cpp?rev=275786&r1=275785&r2=275786&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MachineInstr.cpp (original)
+++ llvm/trunk/lib/CodeGen/MachineInstr.cpp Mon Jul 18 08:17:31 2016
@@ -1201,7 +1201,10 @@ MachineInstr::getRegClassConstraint(unsi
 
   unsigned Flag = getOperand(FlagIdx).getImm();
   unsigned RCID;
-  if (InlineAsm::hasRegClassConstraint(Flag, RCID))
+  if ((InlineAsm::getKind(Flag) == InlineAsm::Kind_RegUse ||
+       InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDef ||
+       InlineAsm::getKind(Flag) == InlineAsm::Kind_RegDefEarlyClobber) &&
+      InlineAsm::hasRegClassConstraint(Flag, RCID))
     return TRI->getRegClass(RCID);
 
   // Assume that all registers in a memory operand are pointers.
@@ -1826,13 +1829,41 @@ void MachineInstr::print(raw_ostream &OS
       }
 
       unsigned RCID = 0;
-      if (InlineAsm::hasRegClassConstraint(Flag, RCID)) {
+      if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) &&
+          InlineAsm::hasRegClassConstraint(Flag, RCID)) {
         if (TRI) {
           OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
         } else
           OS << ":RC" << RCID;
       }
 
+      if (InlineAsm::isMemKind(Flag)) {
+        unsigned MCID = InlineAsm::getMemoryConstraintID(Flag);
+        switch (MCID) {
+        case InlineAsm::Constraint_es: OS << ":es"; break;
+        case InlineAsm::Constraint_i:  OS << ":i"; break;
+        case InlineAsm::Constraint_m:  OS << ":m"; break;
+        case InlineAsm::Constraint_o:  OS << ":o"; break;
+        case InlineAsm::Constraint_v:  OS << ":v"; break;
+        case InlineAsm::Constraint_Q:  OS << ":Q"; break;
+        case InlineAsm::Constraint_R:  OS << ":R"; break;
+        case InlineAsm::Constraint_S:  OS << ":S"; break;
+        case InlineAsm::Constraint_T:  OS << ":T"; break;
+        case InlineAsm::Constraint_Um: OS << ":Um"; break;
+        case InlineAsm::Constraint_Un: OS << ":Un"; break;
+        case InlineAsm::Constraint_Uq: OS << ":Uq"; break;
+        case InlineAsm::Constraint_Us: OS << ":Us"; break;
+        case InlineAsm::Constraint_Ut: OS << ":Ut"; break;
+        case InlineAsm::Constraint_Uv: OS << ":Uv"; break;
+        case InlineAsm::Constraint_Uy: OS << ":Uy"; break;
+        case InlineAsm::Constraint_X:  OS << ":X"; break;
+        case InlineAsm::Constraint_Z:  OS << ":Z"; break;
+        case InlineAsm::Constraint_ZC: OS << ":ZC"; break;
+        case InlineAsm::Constraint_Zy: OS << ":Zy"; break;
+        default: OS << ":?"; break;
+        }
+      }
+
       unsigned TiedTo = 0;
       if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo))
         OS << " tiedto:$" << TiedTo;

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp?rev=275786&r1=275785&r2=275786&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp Mon Jul 18 08:17:31 2016
@@ -1950,15 +1950,15 @@ void SelectionDAGISel::SelectInlineAsmMe
 
       // Otherwise, this is a memory operand.  Ask the target to select it.
       std::vector<SDValue> SelOps;
-      if (SelectInlineAsmMemoryOperand(InOps[i+1],
-                                       InlineAsm::getMemoryConstraintID(Flags),
-                                       SelOps))
+      unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags);
+      if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps))
         report_fatal_error("Could not match memory address.  Inline asm"
                            " failure!");
 
       // Add this to the output node.
       unsigned NewFlags =
         InlineAsm::getFlagWord(InlineAsm::Kind_Mem, SelOps.size());
+      NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID);
       Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32));
       Ops.insert(Ops.end(), SelOps.begin(), SelOps.end());
       i += 2;

Modified: llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp?rev=275786&r1=275785&r2=275786&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsSERegisterInfo.cpp Mon Jul 18 08:17:31 2016
@@ -60,10 +60,11 @@ MipsSERegisterInfo::intRegClass(unsigned
   return &Mips::GPR64RegClass;
 }
 
-/// Get the size of the offset supported by the given load/store.
+/// Get the size of the offset supported by the given load/store/inline asm.
 /// The result includes the effects of any scale factors applied to the
 /// instruction immediate.
-static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode) {
+static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode,
+                                                    MachineOperand MO) {
   switch (Opcode) {
   case Mips::LD_B:
   case Mips::ST_B:
@@ -77,6 +78,49 @@ static inline unsigned getLoadStoreOffse
   case Mips::LD_D:
   case Mips::ST_D:
     return 10 + 3 /* scale factor */;
+  case Mips::LL:
+  case Mips::LL64:
+  case Mips::LLD:
+  case Mips::LLE:
+  case Mips::SC:
+  case Mips::SC64:
+  case Mips::SCD:
+  case Mips::SCE:
+    return 16;
+  case Mips::LLE_MM:
+  case Mips::LLE_MMR6:
+  case Mips::LL_MM:
+  case Mips::SCE_MM:
+  case Mips::SCE_MMR6:
+  case Mips::SC_MM:
+    return 12;
+  case Mips::LL64_R6:
+  case Mips::LL_R6:
+  case Mips::LLD_R6:
+  case Mips::SC64_R6:
+  case Mips::SCD_R6:
+  case Mips::SC_R6:
+    return 9;
+  case Mips::INLINEASM: {
+    unsigned ConstraintID = InlineAsm::getMemoryConstraintID(MO.getImm());
+    switch (ConstraintID) {
+    case InlineAsm::Constraint_ZC: {
+      const MipsSubtarget &Subtarget = MO.getParent()
+                                           ->getParent()
+                                           ->getParent()
+                                           ->getSubtarget<MipsSubtarget>();
+      if (Subtarget.inMicroMipsMode())
+        return 12;
+
+      if (Subtarget.hasMips32r6())
+        return 9;
+
+      return 16;
+    }
+    default:
+      return 16;
+    }
+  }
   default:
     return 16;
   }
@@ -166,7 +210,8 @@ void MipsSERegisterInfo::eliminateFI(Mac
     // Make sure Offset fits within the field available.
     // For MSA instructions, this is a 10-bit signed immediate (scaled by
     // element size), otherwise it is a 16-bit signed immediate.
-    unsigned OffsetBitSize = getLoadStoreOffsetSizeInBits(MI.getOpcode());
+    unsigned OffsetBitSize =
+        getLoadStoreOffsetSizeInBits(MI.getOpcode(), MI.getOperand(OpNo - 1));
     unsigned OffsetAlign = getLoadStoreOffsetAlign(MI.getOpcode());
 
     if (OffsetBitSize < 16 && isInt<16>(Offset) &&

Added: llvm/trunk/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll?rev=275786&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/inlineasm-constraint_ZC_2.ll Mon Jul 18 08:17:31 2016
@@ -0,0 +1,36 @@
+; RUN: llc -march=mips -mcpu=mips32r6 < %s | FileCheck %s --check-prefixes=ALL,R6
+; RUN: llc -march=mips -mcpu=mips64r6 -target-abi=n64 < %s | FileCheck %s --check-prefixes=ALL,R6
+; RUN: llc -march=mips -mcpu=mips32 < %s | FileCheck %s --check-prefixes=ALL,PRER6
+; RUN: llc -march=mips -mcpu=mips64 -target-abi=n64 < %s | FileCheck %s --check-prefixes=ALL,PRER6
+
+
+%struct.anon = type { [63 x i32], i32, i32 }
+
+define i32 @Atomic() {
+; CHECK-LABEL: Atomic:
+entry:
+  %s = alloca %struct.anon, align 4
+  %0 = bitcast %struct.anon* %s to i8*
+  %count = getelementptr inbounds %struct.anon, %struct.anon* %s, i64 0, i32 1
+  store i32 0, i32* %count, align 4
+; R6: addiu $[[R0:[0-9a-z]+]], $sp, {{[0-9]+}}
+
+; ALL: #APP
+
+; R6: ll ${{[0-9a-z]+}}, 0($[[R0]])
+; R6: sc ${{[0-9a-z]+}}, 0($[[R0]])
+
+; PRER6: ll ${{[0-9a-z]+}}, {{[0-9]+}}(${{[0-9a-z]+}})
+; PRER6: sc ${{[0-9a-z]+}}, {{[0-9]+}}(${{[0-9a-z]+}})
+
+; ALL: #NO_APP
+
+  %1 = call { i32, i32 } asm sideeffect ".set push\0A.set noreorder\0A1:\0All $0, $2\0Aaddu $1, $0, $3\0Asc $1, $2\0Abeqz $1, 1b\0Aaddu $1, $0, $3\0A.set pop\0A", "=&r,=&r,=*^ZC,Ir,*^ZC,~{memory},~{$1}"(i32* %count, i32 10, i32* %count)
+  %asmresult1.i = extractvalue { i32, i32 } %1, 1
+  %cmp = icmp ne i32 %asmresult1.i, 10
+  %conv = zext i1 %cmp to i32
+  %call2 = call i32 @f(i32 signext %conv)
+  ret i32 %call2
+}
+
+declare i32 @f(i32 signext)




More information about the llvm-commits mailing list