[llvm] r206495 - ARM64: [su]xtw use W regs as inputs, not X regs.
Jim Grosbach
grosbach at apple.com
Thu Apr 17 13:47:32 PDT 2014
Author: grosbach
Date: Thu Apr 17 15:47:31 2014
New Revision: 206495
URL: http://llvm.org/viewvc/llvm-project?rev=206495&view=rev
Log:
ARM64: [su]xtw use W regs as inputs, not X regs.
Update the SXT[BHW]/UXTW instruction aliases and the shifted reg addressing
mode handling.
PR19455 and rdar://16650642
Modified:
llvm/trunk/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp
llvm/trunk/test/CodeGen/ARM64/aapcs.ll
llvm/trunk/test/CodeGen/ARM64/addr-type-promotion.ll
llvm/trunk/test/CodeGen/ARM64/arith.ll
llvm/trunk/test/CodeGen/ARM64/atomic.ll
llvm/trunk/test/CodeGen/ARM64/coalesce-ext.ll
llvm/trunk/test/CodeGen/ARM64/extend.ll
llvm/trunk/test/CodeGen/ARM64/shifted-sext.ll
llvm/trunk/test/CodeGen/ARM64/trunc-store.ll
llvm/trunk/test/MC/ARM64/aliases.s
llvm/trunk/test/MC/ARM64/memory.s
Modified: llvm/trunk/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp (original)
+++ llvm/trunk/lib/Target/ARM64/InstPrinter/ARM64InstPrinter.cpp Thu Apr 17 15:47:31 2014
@@ -104,7 +104,7 @@ void ARM64InstPrinter::printInst(const M
if (AsmMnemonic) {
O << '\t' << AsmMnemonic << '\t' << getRegisterName(Op0.getReg())
- << ", " << getRegisterName(Op1.getReg());
+ << ", " << getRegisterName(getWRegFromXReg(Op1.getReg()));
printAnnotation(O, Annot);
return;
}
@@ -1253,11 +1253,15 @@ void ARM64InstPrinter::printMemoryPostIn
void ARM64InstPrinter::printMemoryRegOffset(const MCInst *MI, unsigned OpNum,
raw_ostream &O, int LegalShiftAmt) {
- O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ", "
- << getRegisterName(MI->getOperand(OpNum + 1).getReg());
-
unsigned Val = MI->getOperand(OpNum + 2).getImm();
ARM64_AM::ExtendType ExtType = ARM64_AM::getMemExtendType(Val);
+
+ O << '[' << getRegisterName(MI->getOperand(OpNum).getReg()) << ", ";
+ if (ExtType == ARM64_AM::UXTW || ExtType == ARM64_AM::SXTW)
+ O << getRegisterName(getWRegFromXReg(MI->getOperand(OpNum + 1).getReg()));
+ else
+ O << getRegisterName(MI->getOperand(OpNum + 1).getReg());
+
bool DoShift = ARM64_AM::getMemDoShift(Val);
if (ExtType == ARM64_AM::UXTX) {
Modified: llvm/trunk/test/CodeGen/ARM64/aapcs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/aapcs.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/aapcs.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/aapcs.ll Thu Apr 17 15:47:31 2014
@@ -57,7 +57,7 @@ define void @test_extension(i1 %bool, i8
%ext_char = sext i8 %char to i64
store volatile i64 %ext_char, i64* @var64
-; CHECK: sxtb [[EXT:x[0-9]+]], x1
+; CHECK: sxtb [[EXT:x[0-9]+]], w1
; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
%ext_short = zext i16 %short to i64
@@ -67,7 +67,7 @@ define void @test_extension(i1 %bool, i8
%ext_int = zext i32 %int to i64
store volatile i64 %ext_int, i64* @var64
-; CHECK: uxtw [[EXT:x[0-9]+]], x3
+; CHECK: uxtw [[EXT:x[0-9]+]], w3
; CHECK: str [[EXT]], [{{x[0-9]+}}, :lo12:var64]
ret void
Modified: llvm/trunk/test/CodeGen/ARM64/addr-type-promotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/addr-type-promotion.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/addr-type-promotion.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/addr-type-promotion.ll Thu Apr 17 15:47:31 2014
@@ -11,8 +11,8 @@ define zeroext i8 @fullGtU(i32 %i1, i32
; CHECK: adrp [[PAGE:x[0-9]+]], _block at GOTPAGE
; CHECK: ldr [[ADDR:x[0-9]+]], {{\[}}[[PAGE]], _block at GOTPAGEOFF]
; CHECK-NEXT: ldr [[BLOCKBASE:x[0-9]+]], {{\[}}[[ADDR]]]
-; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], x0, sxtw]
-; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], x1, sxtw]
+; CHECK-NEXT: ldrb [[BLOCKVAL1:w[0-9]+]], {{\[}}[[BLOCKBASE]], w0, sxtw]
+; CHECK-NEXT: ldrb [[BLOCKVAL2:w[0-9]+]], {{\[}}[[BLOCKBASE]], w1, sxtw]
; CHECK-NEXT cmp [[BLOCKVAL1]], [[BLOCKVAL2]]
; CHECK-NEXT b.ne
; Next BB
Modified: llvm/trunk/test/CodeGen/ARM64/arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/arith.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/arith.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/arith.ll Thu Apr 17 15:47:31 2014
@@ -155,7 +155,7 @@ entry:
define i64 @t17(i16 %a, i64 %x) nounwind ssp {
entry:
; CHECK-LABEL: t17:
-; CHECK: sxth [[REG:x[0-9]+]], x0
+; CHECK: sxth [[REG:x[0-9]+]], w0
; CHECK: sub x0, xzr, [[REG]], lsl #32
; CHECK: ret
%tmp16 = sext i16 %a to i64
Modified: llvm/trunk/test/CodeGen/ARM64/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/atomic.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/atomic.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/atomic.ll Thu Apr 17 15:47:31 2014
@@ -118,8 +118,7 @@ define i8 @atomic_load_relaxed_8(i8* %p,
%ptr_regoff = getelementptr i8* %p, i32 %off32
%val_regoff = load atomic i8* %ptr_regoff unordered, align 1
%tot1 = add i8 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldrb {{w[0-9]+}}, [x0, x1, sxtw]
+; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
%ptr_unscaled = getelementptr i8* %p, i32 -256
%val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1
@@ -144,8 +143,7 @@ define i16 @atomic_load_relaxed_16(i16*
%ptr_regoff = getelementptr i16* %p, i32 %off32
%val_regoff = load atomic i16* %ptr_regoff unordered, align 2
%tot1 = add i16 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldrh {{w[0-9]+}}, [x0, x1, sxtw #1]
+; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
%ptr_unscaled = getelementptr i16* %p, i32 -128
%val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2
@@ -170,8 +168,7 @@ define i32 @atomic_load_relaxed_32(i32*
%ptr_regoff = getelementptr i32* %p, i32 %off32
%val_regoff = load atomic i32* %ptr_regoff unordered, align 4
%tot1 = add i32 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldr {{w[0-9]+}}, [x0, x1, sxtw #2]
+; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
%ptr_unscaled = getelementptr i32* %p, i32 -64
%val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4
@@ -196,8 +193,7 @@ define i64 @atomic_load_relaxed_64(i64*
%ptr_regoff = getelementptr i64* %p, i32 %off32
%val_regoff = load atomic i64* %ptr_regoff unordered, align 8
%tot1 = add i64 %val_unsigned, %val_regoff
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: ldr {{x[0-9]+}}, [x0, x1, sxtw #3]
+; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
%ptr_unscaled = getelementptr i64* %p, i32 -32
%val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8
@@ -229,8 +225,7 @@ define void @atomic_store_relaxed_8(i8*
%ptr_regoff = getelementptr i8* %p, i32 %off32
store atomic i8 %val, i8* %ptr_regoff unordered, align 1
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: strb {{w[0-9]+}}, [x0, x1, sxtw]
+; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw]
%ptr_unscaled = getelementptr i8* %p, i32 -256
store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
@@ -252,8 +247,7 @@ define void @atomic_store_relaxed_16(i16
%ptr_regoff = getelementptr i16* %p, i32 %off32
store atomic i16 %val, i16* %ptr_regoff unordered, align 2
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: strh {{w[0-9]+}}, [x0, x1, sxtw #1]
+; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1]
%ptr_unscaled = getelementptr i16* %p, i32 -128
store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
@@ -275,8 +269,7 @@ define void @atomic_store_relaxed_32(i32
%ptr_regoff = getelementptr i32* %p, i32 %off32
store atomic i32 %val, i32* %ptr_regoff unordered, align 4
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: str {{w[0-9]+}}, [x0, x1, sxtw #2]
+; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2]
%ptr_unscaled = getelementptr i32* %p, i32 -64
store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
@@ -298,8 +291,7 @@ define void @atomic_store_relaxed_64(i64
%ptr_regoff = getelementptr i64* %p, i32 %off32
store atomic i64 %val, i64* %ptr_regoff unordered, align 8
- ; FIXME: syntax is incorrect: "sxtw" should not be able to go with an x-reg.
-; CHECK: str {{x[0-9]+}}, [x0, x1, sxtw #3]
+; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3]
%ptr_unscaled = getelementptr i64* %p, i32 -32
store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
Modified: llvm/trunk/test/CodeGen/ARM64/coalesce-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/coalesce-ext.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/coalesce-ext.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/coalesce-ext.ll Thu Apr 17 15:47:31 2014
@@ -7,7 +7,7 @@ define i32 @test1sext(i64 %A, i64 %B, i3
%D = trunc i64 %C to i32
%E = shl i64 %C, 32
%F = ashr i64 %E, 32
- ; CHECK: sxtw x[[EXT:[0-9]+]], x[[SUM]]
+ ; CHECK: sxtw x[[EXT:[0-9]+]], w[[SUM]]
store volatile i64 %F, i64 *%P2
; CHECK: str x[[EXT]]
store volatile i32 %D, i32* %P
Modified: llvm/trunk/test/CodeGen/ARM64/extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/extend.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/extend.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/extend.ll Thu Apr 17 15:47:31 2014
@@ -5,7 +5,7 @@ define i64 @foo(i32 %i) {
; CHECK: foo
; CHECK: adrp x[[REG:[0-9]+]], _array at GOTPAGE
; CHECK: ldr x[[REG1:[0-9]+]], [x[[REG]], _array at GOTPAGEOFF]
-; CHECK: ldrsw x0, [x[[REG1]], x0, sxtw #2]
+; CHECK: ldrsw x0, [x[[REG1]], w0, sxtw #2]
; CHECK: ret
%idxprom = sext i32 %i to i64
%arrayidx = getelementptr inbounds [0 x i32]* @array, i64 0, i64 %idxprom
Modified: llvm/trunk/test/CodeGen/ARM64/shifted-sext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/shifted-sext.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/shifted-sext.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/shifted-sext.ll Thu Apr 17 15:47:31 2014
@@ -133,7 +133,7 @@ define i64 @extendedRightShiftcharToint6
entry:
; CHECK-LABEL: extendedRightShiftcharToint64By8:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sxtb x[[REG]], x[[REG]]
+; CHECK: sxtb x[[REG]], w[[REG]]
; CHECK: asr x0, x[[REG]], #8
%inc = add i8 %a, 1
%conv = sext i8 %inc to i64
@@ -223,7 +223,7 @@ define i64 @extendedRightShiftshortToint
entry:
; CHECK-LABEL: extendedRightShiftshortToint64By16:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sxth x[[REG]], x[[REG]]
+; CHECK: sxth x[[REG]], w[[REG]]
; CHECK: asr x0, x[[REG]], #16
%inc = add i16 %a, 1
%conv = sext i16 %inc to i64
@@ -268,7 +268,7 @@ define i64 @extendedRightShiftintToint64
entry:
; CHECK-LABEL: extendedRightShiftintToint64By32:
; CHECK: add w[[REG:[0-9]+]], w0, #1
-; CHECK: sxtw x[[REG]], x[[REG]]
+; CHECK: sxtw x[[REG]], w[[REG]]
; CHECK: asr x0, x[[REG]], #32
%inc = add nsw i32 %a, 1
%conv = sext i32 %inc to i64
Modified: llvm/trunk/test/CodeGen/ARM64/trunc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM64/trunc-store.ll?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM64/trunc-store.ll (original)
+++ llvm/trunk/test/CodeGen/ARM64/trunc-store.ll Thu Apr 17 15:47:31 2014
@@ -22,7 +22,7 @@ define void @fct32(i32 %arg, i64 %var) {
; w0 is %arg
; CHECK-NEXT: sub w[[OFFSETREGNUM:[0-9]+]], w0, #1
; w1 is %var truncated
-; CHECK-NEXT: str w1, {{\[}}[[GLOBALADDR]], x[[OFFSETREGNUM]], sxtw #2]
+; CHECK-NEXT: str w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #2]
; CHECK-NEXT: ret
bb:
%.pre37 = load i32** @zptr32, align 8
@@ -42,7 +42,7 @@ define void @fct16(i32 %arg, i64 %var) {
; w0 is %arg
; CHECK-NEXT: sub w[[OFFSETREGNUM:[0-9]+]], w0, #1
; w1 is %var truncated
-; CHECK-NEXT: strh w1, {{\[}}[[GLOBALADDR]], x[[OFFSETREGNUM]], sxtw #1]
+; CHECK-NEXT: strh w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #1]
; CHECK-NEXT: ret
bb:
%.pre37 = load i16** @zptr16, align 8
Modified: llvm/trunk/test/MC/ARM64/aliases.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/ARM64/aliases.s?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/MC/ARM64/aliases.s (original)
+++ llvm/trunk/test/MC/ARM64/aliases.s Thu Apr 17 15:47:31 2014
@@ -239,19 +239,19 @@ foo:
; CHECK: uxtb w1, w2
; CHECK: uxth w1, w2
- sxtb x1, x2
- sxth x1, x2
- sxtw x1, x2
- uxtb x1, x2
- uxth x1, x2
- uxtw x1, x2
+ sxtb x1, w2
+ sxth x1, w2
+ sxtw x1, w2
+ uxtb x1, w2
+ uxth x1, w2
+ uxtw x1, w2
-; CHECK: sxtb x1, x2
-; CHECK: sxth x1, x2
-; CHECK: sxtw x1, x2
-; CHECK: uxtb x1, x2
-; CHECK: uxth x1, x2
-; CHECK: uxtw x1, x2
+; CHECK: sxtb x1, w2
+; CHECK: sxth x1, w2
+; CHECK: sxtw x1, w2
+; CHECK: uxtb x1, w2
+; CHECK: uxth x1, w2
+; CHECK: uxtw x1, w2
;-----------------------------------------------------------------------------
; Negate with carry
Modified: llvm/trunk/test/MC/ARM64/memory.s
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/MC/ARM64/memory.s?rev=206495&r1=206494&r2=206495&view=diff
==============================================================================
--- llvm/trunk/test/MC/ARM64/memory.s (original)
+++ llvm/trunk/test/MC/ARM64/memory.s Thu Apr 17 15:47:31 2014
@@ -426,14 +426,14 @@ foo:
; CHECK: ldr q1, [x1, x2, lsl #4] ; encoding: [0x21,0x78,0xe2,0x3c]
str d1, [sp, x3]
- str d1, [sp, x3, uxtw #3]
+ str d1, [sp, w3, uxtw #3]
str q1, [sp, x3]
- str q1, [sp, x3, uxtw #4]
+ str q1, [sp, w3, uxtw #4]
; CHECK: str d1, [sp, x3] ; encoding: [0xe1,0x6b,0x23,0xfc]
-; CHECK: str d1, [sp, x3, uxtw #3] ; encoding: [0xe1,0x5b,0x23,0xfc]
+; CHECK: str d1, [sp, w3, uxtw #3] ; encoding: [0xe1,0x5b,0x23,0xfc]
; CHECK: str q1, [sp, x3] ; encoding: [0xe1,0x6b,0xa3,0x3c]
-; CHECK: str q1, [sp, x3, uxtw #4] ; encoding: [0xe1,0x5b,0xa3,0x3c]
+; CHECK: str q1, [sp, w3, uxtw #4] ; encoding: [0xe1,0x5b,0xa3,0x3c]
;-----------------------------------------------------------------------------
; Load literal
More information about the llvm-commits
mailing list