[llvm] r371967 - [SVE][Inline-Asm] Add constraints for SVE predicate registers
Kerry McLaughlin via llvm-commits
llvm-commits at lists.llvm.org
Mon Sep 16 02:45:27 PDT 2019
Author: kmclaughlin
Date: Mon Sep 16 02:45:27 2019
New Revision: 371967
URL: http://llvm.org/viewvc/llvm-project?rev=371967&view=rev
Log:
[SVE][Inline-Asm] Add constraints for SVE predicate registers
Summary:
Adds the following inline asm constraints for SVE:
- Upl: One of the low eight SVE predicate registers, P0 to P7 inclusive
- Upa: SVE predicate register with full range, P0 to P15
Reviewers: t.p.northover, sdesmalen, rovka, momchil.velikov, cameron.mcinally, greened, rengolin
Reviewed By: rovka
Subscribers: javed.absar, tschuett, rkruppe, psnobl, cfe-commits, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66524
Modified:
llvm/trunk/docs/LangRef.rst
llvm/trunk/lib/IR/InlineAsm.cpp
llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp
llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
llvm/trunk/test/CodeGen/AArch64/aarch64-sve-asm.ll
Modified: llvm/trunk/docs/LangRef.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/LangRef.rst?rev=371967&r1=371966&r2=371967&view=diff
==============================================================================
--- llvm/trunk/docs/LangRef.rst (original)
+++ llvm/trunk/docs/LangRef.rst Mon Sep 16 02:45:27 2019
@@ -3825,6 +3825,8 @@ AArch64:
- ``w``: A 32, 64, or 128-bit floating-point, SIMD or SVE vector register.
- ``x``: Like w, but restricted to registers 0 to 15 inclusive.
- ``y``: Like w, but restricted to SVE vector registers Z0 to Z7 inclusive.
+- ``Upl``: One of the low eight SVE predicate registers (P0 to P7)
+- ``Upa``: Any of the SVE predicate registers (P0 to P15)
AMDGPU:
Modified: llvm/trunk/lib/IR/InlineAsm.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/InlineAsm.cpp?rev=371967&r1=371966&r2=371967&view=diff
==============================================================================
--- llvm/trunk/lib/IR/InlineAsm.cpp (original)
+++ llvm/trunk/lib/IR/InlineAsm.cpp Mon Sep 16 02:45:27 2019
@@ -181,6 +181,16 @@ bool InlineAsm::ConstraintInfo::Parse(St
// FIXME: For now assuming these are 2-character constraints.
pCodes->push_back(StringRef(I+1, 2));
I += 3;
+ } else if (*I == '@') {
+ // Multi-letter constraint
+ ++I;
+ unsigned char C = static_cast<unsigned char>(*I);
+ assert(isdigit(C) && "Expected a digit!");
+ int N = C - '0';
+ assert(N > 0 && "Found a zero letter constraint!");
+ ++I;
+ pCodes->push_back(StringRef(I, N));
+ I += N;
} else {
// Single letter constraint.
pCodes->push_back(StringRef(I, 1));
Modified: llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp?rev=371967&r1=371966&r2=371967&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64AsmPrinter.cpp Mon Sep 16 02:45:27 2019
@@ -618,6 +618,8 @@ bool AArch64AsmPrinter::PrintAsmOperand(
const TargetRegisterClass *RegClass;
if (AArch64::ZPRRegClass.contains(Reg)) {
RegClass = &AArch64::ZPRRegClass;
+ } else if (AArch64::PPRRegClass.contains(Reg)) {
+ RegClass = &AArch64::PPRRegClass;
} else {
RegClass = &AArch64::FPR128RegClass;
AltName = AArch64::vreg;
Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=371967&r1=371966&r2=371967&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Sep 16 02:45:27 2019
@@ -5837,6 +5837,21 @@ const char *AArch64TargetLowering::Lower
return "r";
}
+enum PredicateConstraint {
+ Upl,
+ Upa,
+ Invalid
+};
+
+PredicateConstraint parsePredicateConstraint(StringRef Constraint) {
+ PredicateConstraint P = PredicateConstraint::Invalid;
+ if (Constraint == "Upa")
+ P = PredicateConstraint::Upa;
+ if (Constraint == "Upl")
+ P = PredicateConstraint::Upl;
+ return P;
+}
+
/// getConstraintType - Given a constraint letter, return the type of
/// constraint it is for this target.
AArch64TargetLowering::ConstraintType
@@ -5866,7 +5881,9 @@ AArch64TargetLowering::getConstraintType
case 'S': // A symbolic address
return C_Other;
}
- }
+ } else if (parsePredicateConstraint(Constraint) !=
+ PredicateConstraint::Invalid)
+ return C_RegisterClass;
return TargetLowering::getConstraintType(Constraint);
}
@@ -5897,6 +5914,10 @@ AArch64TargetLowering::getSingleConstrai
case 'z':
weight = CW_Constant;
break;
+ case 'U':
+ if (parsePredicateConstraint(constraint) != PredicateConstraint::Invalid)
+ weight = CW_Register;
+ break;
}
return weight;
}
@@ -5941,6 +5962,14 @@ AArch64TargetLowering::getRegForInlineAs
return std::make_pair(0U, &AArch64::ZPR_3bRegClass);
break;
}
+ } else {
+ PredicateConstraint PC = parsePredicateConstraint(Constraint);
+ if (PC != PredicateConstraint::Invalid) {
+ assert(VT.isScalableVector());
+ bool restricted = (PC == PredicateConstraint::Upl);
+ return restricted ? std::make_pair(0U, &AArch64::PPR_3bRegClass)
+ : std::make_pair(0U, &AArch64::PPRRegClass);
+ }
}
if (StringRef("{cc}").equals_lower(Constraint))
return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass);
Modified: llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp?rev=371967&r1=371966&r2=371967&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64InstrInfo.cpp Mon Sep 16 02:45:27 2019
@@ -2507,6 +2507,17 @@ void AArch64InstrInfo::copyPhysReg(Machi
return;
}
+ // Copy a Predicate register by ORRing with itself.
+ if (AArch64::PPRRegClass.contains(DestReg) &&
+ AArch64::PPRRegClass.contains(SrcReg)) {
+ assert(Subtarget.hasSVE() && "Unexpected SVE register.");
+ BuildMI(MBB, I, DL, get(AArch64::ORR_PPzPP), DestReg)
+ .addReg(SrcReg) // Pg
+ .addReg(SrcReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ return;
+ }
+
// Copy a Z register by ORRing with itself.
if (AArch64::ZPRRegClass.contains(DestReg) &&
AArch64::ZPRRegClass.contains(SrcReg)) {
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-sve-asm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-sve-asm.ll?rev=371967&r1=371966&r2=371967&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-sve-asm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-sve-asm.ll Mon Sep 16 02:45:27 2019
@@ -8,6 +8,7 @@ target triple = "aarch64-none-linux-gnu"
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]]
+; CHECK: INLINEASM {{.*}} [[ARG4]]
define <vscale x 16 x i8> @test_svadd_i8(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm) {
%1 = tail call <vscale x 16 x i8> asm "add $0.b, $1.b, $2.b", "=w,w,y"(<vscale x 16 x i8> %Zn, <vscale x 16 x i8> %Zm)
ret <vscale x 16 x i8> %1
@@ -18,6 +19,7 @@ define <vscale x 16 x i8> @test_svadd_i8
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]]
+; CHECK: INLINEASM {{.*}} [[ARG4]]
define <vscale x 2 x i64> @test_svsub_i64(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm) {
%1 = tail call <vscale x 2 x i64> asm "sub $0.d, $1.d, $2.d", "=w,w,x"(<vscale x 2 x i64> %Zn, <vscale x 2 x i64> %Zm)
ret <vscale x 2 x i64> %1
@@ -28,6 +30,7 @@ define <vscale x 2 x i64> @test_svsub_i6
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
; CHECK: [[ARG4:%[0-9]+]]:zpr_3b = COPY [[ARG1]]
+; CHECK: INLINEASM {{.*}} [[ARG4]]
define <vscale x 8 x half> @test_svfmul_f16(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) {
%1 = tail call <vscale x 8 x half> asm "fmul $0.h, $1.h, $2.h", "=w,w,y"(<vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm)
ret <vscale x 8 x half> %1
@@ -38,7 +41,30 @@ define <vscale x 8 x half> @test_svfmul_
; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
; CHECK: [[ARG3:%[0-9]+]]:zpr = COPY [[ARG2]]
; CHECK: [[ARG4:%[0-9]+]]:zpr_4b = COPY [[ARG1]]
+; CHECK: INLINEASM {{.*}} [[ARG4]]
define <vscale x 4 x float> @test_svfmul_f(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm) {
%1 = tail call <vscale x 4 x float> asm "fmul $0.s, $1.s, $2.s", "=w,w,x"(<vscale x 4 x float> %Zn, <vscale x 4 x float> %Zm)
ret <vscale x 4 x float> %1
}
+
+; Function Attrs: nounwind readnone
+; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z1
+; CHECK: [[ARG2:%[0-9]+]]:zpr = COPY $z0
+; CHECK: [[ARG3:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[ARG4:%[0-9]+]]:ppr_3b = COPY [[ARG3]]
+; CHECK: INLINEASM {{.*}} [[ARG4]]
+define <vscale x 8 x half> @test_svfadd_f16(<vscale x 16 x i1> %Pg, <vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm) {
+ %1 = tail call <vscale x 8 x half> asm "fadd $0.h, $1/m, $2.h, $3.h", "=w, at 3Upl,w,w"(<vscale x 16 x i1> %Pg, <vscale x 8 x half> %Zn, <vscale x 8 x half> %Zm)
+ ret <vscale x 8 x half> %1
+}
+
+; Function Attrs: nounwind readnone
+; CHECK: [[ARG1:%[0-9]+]]:zpr = COPY $z0
+; CHECK: [[ARG2:%[0-9]+]]:ppr = COPY $p0
+; CHECK: [[ARG3:%[0-9]+]]:ppr = COPY [[ARG2]]
+; CHECK: [[ARG4:%[0-9]+]]:zpr = COPY [[ARG1]]
+; CHECK: INLINEASM {{.*}} [[ARG3]]
+define <vscale x 4 x i32> @test_incp(<vscale x 16 x i1> %Pg, <vscale x 4 x i32> %Zn) {
+ %1 = tail call <vscale x 4 x i32> asm "incp $0.s, $1", "=w, at 3Upa,0"(<vscale x 16 x i1> %Pg, <vscale x 4 x i32> %Zn)
+ ret <vscale x 4 x i32> %1
+}
More information about the llvm-commits
mailing list