[llvm] c84b2c4 - [VE] Support inline assembly with vector regsiters
Kazushi Marukawa via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 10 03:55:47 PST 2020
Author: Kazushi (Jam) Marukawa
Date: 2020-11-10T20:55:38+09:00
New Revision: c84b2c49be46b701648b55dbd73b588d5551f04e
URL: https://github.com/llvm/llvm-project/commit/c84b2c49be46b701648b55dbd73b588d5551f04e
DIFF: https://github.com/llvm/llvm-project/commit/c84b2c49be46b701648b55dbd73b588d5551f04e.diff
LOG: [VE] Support inline assembly with vector regsiters
Support inline assembly with vector registers. Add a regression test also.
Reviewed By: simoll
Differential Revision: https://reviews.llvm.org/D91146
Added:
llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll
Modified:
llvm/lib/Target/VE/VEAsmPrinter.cpp
llvm/lib/Target/VE/VEISelLowering.cpp
llvm/lib/Target/VE/VEISelLowering.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/VE/VEAsmPrinter.cpp b/llvm/lib/Target/VE/VEAsmPrinter.cpp
index 4b63786203cc..0ebf0a87de74 100644
--- a/llvm/lib/Target/VE/VEAsmPrinter.cpp
+++ b/llvm/lib/Target/VE/VEAsmPrinter.cpp
@@ -377,6 +377,7 @@ bool VEAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
// See if this is a generic print operand
return AsmPrinter::PrintAsmOperand(MI, OpNo, ExtraCode, O);
case 'r':
+ case 'v':
break;
}
}
diff --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index 49a52e951b86..4fa548de5551 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -1551,6 +1551,19 @@ SDValue VETargetLowering::PerformDAGCombine(SDNode *N,
// VE Inline Assembly Support
//===----------------------------------------------------------------------===//
+VETargetLowering::ConstraintType
+VETargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default:
+ break;
+ case 'v': // vector registers
+ return C_RegisterClass;
+ }
+ }
+ return TargetLowering::getConstraintType(Constraint);
+}
+
std::pair<unsigned, const TargetRegisterClass *>
VETargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint,
@@ -1563,6 +1576,9 @@ VETargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
case 'r':
RC = &VE::I64RegClass;
break;
+ case 'v':
+ RC = &VE::V64RegClass;
+ break;
}
return std::make_pair(0U, RC);
}
diff --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h
index 8fbd89dd6025..4ac609b73830 100644
--- a/llvm/lib/Target/VE/VEISelLowering.h
+++ b/llvm/lib/Target/VE/VEISelLowering.h
@@ -128,6 +128,7 @@ class VETargetLowering : public TargetLowering {
/// Inline Assembly {
+ ConstraintType getConstraintType(StringRef Constraint) const override;
std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
StringRef Constraint, MVT VT) const override;
diff --git a/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll
new file mode 100644
index 000000000000..9ce65adce4c6
--- /dev/null
+++ b/llvm/test/CodeGen/VE/Scalar/inlineasm-vldvst.ll
@@ -0,0 +1,74 @@
+; RUN: llc < %s -mtriple=ve -mattr=+vpu | FileCheck %s
+
+define void @vld(i8* %p, i64 %i) nounwind {
+; CHECK-LABEL: vld:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: #APP
+; CHECK-NEXT: lea %s2, 256
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: lvl %s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vld %v0, %s1, %s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: or %s11, 0, %s9
+ %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind
+ tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind
+ tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
+ ret void
+}
+
+define void @vldvst(i8* %p, i64 %i) nounwind {
+; CHECK-LABEL: vldvst:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: #APP
+; CHECK-NEXT: lea %s2, 256
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: lvl %s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vld %v0, %s1, %s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vst %v0, %s1, %s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: or %s11, 0, %s9
+ %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind
+ tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind
+ %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
+ tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, i8* %p, i64 %i) nounwind
+ ret void
+}
+
+define void @vld2vst2(i8* %p, i64 %i) nounwind {
+; CHECK-LABEL: vld2vst2:
+; CHECK: .LBB{{[0-9]+}}_2:
+; CHECK-NEXT: #APP
+; CHECK-NEXT: lea %s2, 256
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: lvl %s2
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vld %v0, %s1, %s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vld %v1, %s1, %s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vst %v0, %s1, %s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: #APP
+; CHECK-NEXT: vst %v1, %s1, %s0
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: or %s11, 0, %s9
+ %lvl = tail call i64 asm sideeffect "lea $0, 256", "=r"() nounwind
+ tail call void asm sideeffect "lvl $0", "r"(i64 %lvl) nounwind
+ %1 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
+ %2 = tail call <256 x double> asm sideeffect "vld $0, $2, $1", "=v,r,r"(i8* %p, i64 %i) nounwind
+ tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %1, i8* %p, i64 %i) nounwind
+ tail call void asm sideeffect "vst $0, $2, $1", "v,r,r"(<256 x double> %2, i8* %p, i64 %i) nounwind
+ ret void
+}
More information about the llvm-commits
mailing list