[llvm] b087b91 - [AMDGPU][CODEGEN] Added 'A' constraint for inline assembler

Dmitry Preobrazhensky via llvm-commits llvm-commits at lists.llvm.org
Mon May 25 04:24:05 PDT 2020


Author: Dmitry Preobrazhensky
Date: 2020-05-25T14:23:34+03:00
New Revision: b087b91c917087bc53d47282a16ee4af78bfe286

URL: https://github.com/llvm/llvm-project/commit/b087b91c917087bc53d47282a16ee4af78bfe286
DIFF: https://github.com/llvm/llvm-project/commit/b087b91c917087bc53d47282a16ee4af78bfe286.diff

LOG: [AMDGPU][CODEGEN] Added 'A' constraint for inline assembler

Summary: 'A' constraint requires an immediate int or fp constant that can be inlined in an instruction encoding.

Reviewers: arsenm, rampitec

Differential Revision: https://reviews.llvm.org/D78494

Added: 
    

Modified: 
    llvm/docs/LangRef.rst
    llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/lib/Target/AMDGPU/SIISelLowering.h
    llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
    llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
    llvm/test/CodeGen/AMDGPU/inline-constraints.ll

Removed: 
    


################################################################################
diff  --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index a1f3297d6454..bf0627e44196 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -4128,7 +4128,7 @@ AMDGPU:
 - ``[0-9]v``: The 32-bit VGPR register, number 0-9.
 - ``[0-9]s``: The 32-bit SGPR register, number 0-9.
 - ``[0-9]a``: The 32-bit AGPR register, number 0-9.
-
+- ``A``: An integer or a floating-point inline constant.
 
 All ARM modes:
 

diff  --git a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
index 36cc0ea20052..81676d63643d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
@@ -1339,7 +1339,18 @@ bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
     AMDGPUInstPrinter::printRegOperand(MO.getReg(), O,
                                        *MF->getSubtarget().getRegisterInfo());
     return false;
+  } else if (MO.isImm()) {
+    int64_t Val = MO.getImm();
+    if (AMDGPU::isInlinableIntLiteral(Val)) {
+      O << Val;
+    } else if (isUInt<16>(Val)) {
+      O << format("0x%" PRIx64, static_cast<uint16_t>(Val));
+    } else if (isUInt<32>(Val)) {
+      O << format("0x%" PRIx64, static_cast<uint32_t>(Val));
+    } else {
+      O << format("0x%" PRIx64, static_cast<uint64_t>(Val));
+    }
+    return false;
   }
-
   return true;
 }

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 3ddf4ae70397..2c147fa8947c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -10886,11 +10886,69 @@ SITargetLowering::getConstraintType(StringRef Constraint) const {
     case 'v':
     case 'a':
       return C_RegisterClass;
+    case 'A':
+      return C_Other;
     }
   }
   return TargetLowering::getConstraintType(Constraint);
 }
 
+void SITargetLowering::LowerAsmOperandForConstraint(SDValue Op,
+                                                    std::string &Constraint,
+                                                    std::vector<SDValue> &Ops,
+                                                    SelectionDAG &DAG) const {
+  if (Constraint.length() == 1 && Constraint[0] == 'A') {
+    LowerAsmOperandForConstraintA(Op, Ops, DAG);
+  } else {
+    TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+  }
+}
+
+void SITargetLowering::LowerAsmOperandForConstraintA(SDValue Op,
+                                                     std::vector<SDValue> &Ops,
+                                                     SelectionDAG &DAG) const {
+  unsigned Size = Op.getScalarValueSizeInBits();
+  if (Size > 64)
+    return;
+
+  uint64_t Val;
+  bool IsConst = false;
+  if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+    Val = C->getSExtValue();
+    IsConst = true;
+  } else if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op)) {
+    Val = C->getValueAPF().bitcastToAPInt().getSExtValue();
+    IsConst = true;
+  } else if (BuildVectorSDNode *V = dyn_cast<BuildVectorSDNode>(Op)) {
+    if (Size != 16 || Op.getNumOperands() != 2)
+      return;
+    if (Op.getOperand(0).isUndef() || Op.getOperand(1).isUndef())
+      return;
+    if (ConstantSDNode *C = V->getConstantSplatNode()) {
+      Val = C->getSExtValue();
+      IsConst = true;
+    } else if (ConstantFPSDNode *C = V->getConstantFPSplatNode()) {
+      Val = C->getValueAPF().bitcastToAPInt().getSExtValue();
+      IsConst = true;
+    }
+  }
+
+  if (IsConst) {
+    bool HasInv2Pi = Subtarget->hasInv2PiInlineImm();
+    if ((Size == 16 && AMDGPU::isInlinableLiteral16(Val, HasInv2Pi)) ||
+        (Size == 32 && AMDGPU::isInlinableLiteral32(Val, HasInv2Pi)) ||
+        (Size == 64 && AMDGPU::isInlinableLiteral64(Val, HasInv2Pi))) {
+      // Clear unused bits of fp constants
+      if (!AMDGPU::isInlinableIntLiteral(Val)) {
+        unsigned UnusedBits = 64 - Size;
+        Val = (Val << UnusedBits) >> UnusedBits;
+      }
+      auto Res = DAG.getTargetConstant(Val, SDLoc(Op), MVT::i64);
+      Ops.push_back(Res);
+    }
+  }
+}
+
 // Figure out which registers should be reserved for stack access. Only after
 // the function is legalized do we know all of the non-spill stack objects or if
 // calls are present.

diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 226003423889..7ef11eba4f9c 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -383,6 +383,13 @@ class SITargetLowering final : public AMDGPUTargetLowering {
   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
                                StringRef Constraint, MVT VT) const override;
   ConstraintType getConstraintType(StringRef Constraint) const override;
+  void LowerAsmOperandForConstraint(SDValue Op,
+                                    std::string &Constraint,
+                                    std::vector<SDValue> &Ops,
+                                    SelectionDAG &DAG) const override;
+  void LowerAsmOperandForConstraintA(SDValue Op,
+                                     std::vector<SDValue> &Ops,
+                                     SelectionDAG &DAG) const;
   SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL,
                    SDValue V) const;
 

diff  --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 409bef0065e2..cba9857e4d15 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -1169,8 +1169,12 @@ unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
   return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
 }
 
+bool isInlinableIntLiteral(int64_t Literal) {
+  return Literal >= -16 && Literal <= 64;
+}
+
 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
-  if (Literal >= -16 && Literal <= 64)
+  if (isInlinableIntLiteral(Literal))
     return true;
 
   uint64_t Val = static_cast<uint64_t>(Literal);
@@ -1187,7 +1191,7 @@ bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
 }
 
 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
-  if (Literal >= -16 && Literal <= 64)
+  if (isInlinableIntLiteral(Literal))
     return true;
 
   // The actual type of the operand does not seem to matter as long
@@ -1216,7 +1220,7 @@ bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
   if (!HasInv2Pi)
     return false;
 
-  if (Literal >= -16 && Literal <= 64)
+  if (isInlinableIntLiteral(Literal))
     return true;
 
   uint16_t Val = static_cast<uint16_t>(Literal);

diff  --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index ef7b22413884..224f797b3ef8 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -776,6 +776,9 @@ struct SIModeRegisterDefaults {
   }
 };
 
+LLVM_READNONE
+bool isInlinableIntLiteral(int64_t Literal);
+
 } // end namespace AMDGPU
 } // end namespace llvm
 

diff  --git a/llvm/test/CodeGen/AMDGPU/inline-constraints.ll b/llvm/test/CodeGen/AMDGPU/inline-constraints.ll
index 6f1d35519f2f..63585ebc9553 100644
--- a/llvm/test/CodeGen/AMDGPU/inline-constraints.ll
+++ b/llvm/test/CodeGen/AMDGPU/inline-constraints.ll
@@ -1,5 +1,8 @@
-; RUN: llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN %s
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=GCN %s
+; RUN: not llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs | FileCheck --check-prefix=GCN %s
+; RUN: not llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck --check-prefix=GCN --check-prefix=VI %s
+
+; RUN: not llc < %s -march=amdgcn -mcpu=bonaire -verify-machineinstrs 2>&1 | FileCheck --check-prefix=NOGCN --check-prefix=NOSI %s
+; RUN: not llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs 2>&1 | FileCheck --check-prefix=NOGCN %s
 
 ; GCN-LABEL: {{^}}inline_reg_constraints:
 ; GCN: flat_load_dword v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}]
@@ -74,3 +77,273 @@ define amdgpu_kernel void @inline_sreg_constraint_imm_f64() {
   tail call void asm sideeffect "; use $0", "s"(double 1.0)
   ret void
 }
+
+;==============================================================================
+; 'A' constraint, 16-bit operand
+;==============================================================================
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_H0:
+; VI: v_mov_b32 {{v[0-9]+}}, 64
+define i32 @inline_A_constraint_H0() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 64)
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_H1:
+; VI: v_mov_b32 {{v[0-9]+}}, -16
+define i32 @inline_A_constraint_H1() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 -16)
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_H2:
+; VI: v_mov_b32 {{v[0-9]+}}, 0x3c00
+define i32 @inline_A_constraint_H2() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 bitcast (half 1.0 to i16))
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_H3:
+; VI: v_mov_b32 {{v[0-9]+}}, 0xbc00
+define i32 @inline_A_constraint_H3() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 bitcast (half -1.0 to i16))
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_H4:
+; VI: v_mov_b32 {{v[0-9]+}}, 0x3118
+define i32 @inline_A_constraint_H4() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(half 0xH3118)
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_H5:
+; VI: v_mov_b32 {{v[0-9]+}}, 0x3118
+define i32 @inline_A_constraint_H5() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 bitcast (half 0xH3118 to i16))
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_H6:
+; VI: v_mov_b32 {{v[0-9]+}}, 0xb800
+define i32 @inline_A_constraint_H6() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(half -0.5)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_H7() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 bitcast (half 0xH3119 to i16))
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_H8() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 bitcast (half 0xH3117 to i16))
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_H9() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i16 65)
+  ret i32 %v0
+}
+
+;==============================================================================
+; 'A' constraint, 32-bit operand
+;==============================================================================
+
+; GCN-LABEL: {{^}}inline_A_constraint_F0:
+; GCN: v_mov_b32 {{v[0-9]+}}, -16
+define i32 @inline_A_constraint_F0() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 -16)
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_F1:
+; GCN: v_mov_b32 {{v[0-9]+}}, 1
+define i32 @inline_A_constraint_F1() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 1)
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_F2:
+; GCN: v_mov_b32 {{v[0-9]+}}, 0xbf000000
+define i32 @inline_A_constraint_F2() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 bitcast (float -0.5 to i32))
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_F3:
+; GCN: v_mov_b32 {{v[0-9]+}}, 0x40000000
+define i32 @inline_A_constraint_F3() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 bitcast (float 2.0 to i32))
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_F4:
+; GCN: v_mov_b32 {{v[0-9]+}}, 0xc0800000
+define i32 @inline_A_constraint_F4() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(float -4.0)
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_F5:
+; VI: v_mov_b32 {{v[0-9]+}}, 0x3e22f983
+define i32 @inline_A_constraint_F5() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 1042479491)
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_F6:
+; GCN: v_mov_b32 {{v[0-9]+}}, 0x3f000000
+define i32 @inline_A_constraint_F6() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(float 0.5)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_F7() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 1042479490)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_F8() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 -17)
+  ret i32 %v0
+}
+
+;==============================================================================
+; 'A' constraint, 64-bit operand
+;==============================================================================
+
+; GCN-LABEL: {{^}}inline_A_constraint_D0:
+; GCN: v_mov_b32 {{v[0-9]+}}, -16
+define i32 @inline_A_constraint_D0() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i64 -16)
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_D1:
+; GCN: v_cvt_f32_f64 {{v[0-9]+}}, 0xc000000000000000
+define i32 @inline_A_constraint_D1() {
+  %v0 = tail call i32 asm "v_cvt_f32_f64 $0, $1", "=v,A"(i64 bitcast (double -2.0 to i64))
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_D2:
+; GCN: v_cvt_f32_f64 {{v[0-9]+}}, 0x3fe0000000000000
+define i32 @inline_A_constraint_D2() {
+  %v0 = tail call i32 asm "v_cvt_f32_f64 $0, $1", "=v,A"(double 0.5)
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_D3:
+; VI: v_cvt_f32_f64 {{v[0-9]+}}, 0x3fc45f306dc9c882
+define i32 @inline_A_constraint_D3() {
+  %v0 = tail call i32 asm "v_cvt_f32_f64 $0, $1", "=v,A"(double 0.15915494309189532)
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_D4:
+; VI: v_cvt_f32_f64 {{v[0-9]+}}, 0x3fc45f306dc9c882
+define i32 @inline_A_constraint_D4() {
+  %v0 = tail call i32 asm "v_cvt_f32_f64 $0, $1", "=v,A"(i64 bitcast (double 0.15915494309189532 to i64))
+  ret i32 %v0
+}
+
+; GCN-LABEL: {{^}}inline_A_constraint_D5:
+; GCN: v_cvt_f32_f64 {{v[0-9]+}}, 0xc000000000000000
+define i32 @inline_A_constraint_D5() {
+  %v0 = tail call i32 asm "v_cvt_f32_f64 $0, $1", "=v,A"(double -2.0)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_D8() {
+  %v0 = tail call i32 asm "v_cvt_f32_f64 $0, $1", "=v,A"(double 1.1)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_D9() {
+  %v0 = tail call i32 asm "v_cvt_f32_f64 $0, $1", "=v,A"(i64 bitcast (double 0.1 to i64))
+  ret i32 %v0
+}
+
+;==============================================================================
+; 'A' constraint, v2x16 operand
+;==============================================================================
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_V0:
+; VI: v_mov_b32 {{v[0-9]+}}, -4
+define i32 @inline_A_constraint_V0() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(<2 x i16> <i16 -4, i16 -4>)
+  ret i32 %v0
+}
+
+; NOSI: error: invalid operand for inline asm constraint 'A'
+; VI-LABEL: {{^}}inline_A_constraint_V1:
+; VI: v_mov_b32 {{v[0-9]+}}, 0xb800
+define i32 @inline_A_constraint_V1() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(<2 x half> <half -0.5, half -0.5>)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_V2() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(<2 x i16> <i16 -4, i16 undef>)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_V3() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(<2 x half> <half undef, half -0.5>)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_V4() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(<2 x i16> <i16 1, i16 2>)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_V5() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(<4 x i16> <i16 0, i16 0, i16 0, i16 0>)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_V6() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(<2 x i32> <i32 0, i32 0>)
+  ret i32 %v0
+}
+
+;==============================================================================
+; 'A' constraint, type errors
+;==============================================================================
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_E1(i32 %x) {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i32 %x)
+  ret i32 %v0
+}
+
+; NOGCN: error: invalid operand for inline asm constraint 'A'
+define i32 @inline_A_constraint_E2() {
+  %v0 = tail call i32 asm "v_mov_b32 $0, $1", "=v,A"(i128 100000000000000000000)
+  ret i32 %v0
+}


        


More information about the llvm-commits mailing list