[llvm-commits] [llvm] r147516 - in /llvm/trunk: lib/Target/Mips/MipsISelLowering.cpp test/CodeGen/Mips/2008-08-01-AsmInline.ll test/CodeGen/Mips/inlineasm64.ll
Akira Hatanaka
ahatanaka at mips.com
Tue Jan 3 18:45:01 PST 2012
Author: ahatanak
Date: Tue Jan 3 20:45:01 2012
New Revision: 147516
URL: http://llvm.org/viewvc/llvm-project?rev=147516&view=rev
Log:
Have getRegForInlineAsmConstraint return the correct register class when target
is Mips64.
Added:
llvm/trunk/test/CodeGen/Mips/inlineasm64.ll
Modified:
llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp
llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll
Modified: llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp?rev=147516&r1=147515&r2=147516&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsISelLowering.cpp Tue Jan 3 20:45:01 2012
@@ -2871,14 +2871,19 @@
case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
case 'y': // Same as 'r'. Exists for compatibility.
case 'r':
- return std::make_pair(0U, Mips::CPURegsRegisterClass);
+ if (VT == MVT::i32)
+ return std::make_pair(0U, Mips::CPURegsRegisterClass);
+ assert(VT == MVT::i64 && "Unexpected type.");
+ return std::make_pair(0U, Mips::CPU64RegsRegisterClass);
case 'f':
if (VT == MVT::f32)
return std::make_pair(0U, Mips::FGR32RegisterClass);
- if (VT == MVT::f64)
- if ((!Subtarget->isSingleFloat()) && (!Subtarget->isFP64bit()))
+ if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) {
+ if (Subtarget->isFP64bit())
+ return std::make_pair(0U, Mips::FGR64RegisterClass);
+ else
return std::make_pair(0U, Mips::AFGR64RegisterClass);
- break;
+ }
}
}
return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
Modified: llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll?rev=147516&r1=147515&r2=147516&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll Tue Jan 3 20:45:01 2012
@@ -1,4 +1,5 @@
; RUN: llc -march=mips < %s | FileCheck %s
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
%struct.DWstruct = type { i32, i32 }
@@ -13,3 +14,40 @@
%res = add i32 %asmresult, %asmresult1
ret i32 %res
}
+
+ at gi2 = external global i32
+ at gi1 = external global i32
+ at gi0 = external global i32
+ at gf0 = external global float
+ at gf1 = external global float
+ at gd0 = external global double
+ at gd1 = external global double
+
+define void @foo0() nounwind {
+entry:
+; CHECK: addu
+ %0 = load i32* @gi1, align 4
+ %1 = load i32* @gi0, align 4
+ %2 = tail call i32 asm "addu $0, $1, $2", "=r,r,r"(i32 %0, i32 %1) nounwind
+ store i32 %2, i32* @gi2, align 4
+ ret void
+}
+
+define void @foo2() nounwind {
+entry:
+; CHECK: neg.s
+ %0 = load float* @gf1, align 4
+ %1 = tail call float asm "neg.s $0, $1", "=f,f"(float %0) nounwind
+ store float %1, float* @gf0, align 4
+ ret void
+}
+
+define void @foo3() nounwind {
+entry:
+; CHECK: neg.d
+ %0 = load double* @gd1, align 8
+ %1 = tail call double asm "neg.d $0, $1", "=f,f"(double %0) nounwind
+ store double %1, double* @gd0, align 8
+ ret void
+}
+
Added: llvm/trunk/test/CodeGen/Mips/inlineasm64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/inlineasm64.ll?rev=147516&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/inlineasm64.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/inlineasm64.ll Tue Jan 3 20:45:01 2012
@@ -0,0 +1,17 @@
+; RUN: llc -march=mips64el -mcpu=mips64r2 -mattr=n64 < %s | FileCheck %s
+
+ at gl2 = external global i64
+ at gl1 = external global i64
+ at gl0 = external global i64
+
+define void @foo1() nounwind {
+entry:
+; CHECK: foo1
+; CHECK: daddu
+ %0 = load i64* @gl1, align 8
+ %1 = load i64* @gl0, align 8
+ %2 = tail call i64 asm "daddu $0, $1, $2", "=r,r,r"(i64 %0, i64 %1) nounwind
+ store i64 %2, i64* @gl2, align 8
+ ret void
+}
+
More information about the llvm-commits
mailing list