[llvm] r319445 - [CodeGen] Always use `printReg` to print registers in both MIR and debug
Francis Visoiu Mistrih via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 30 08:12:24 PST 2017
Modified: llvm/trunk/test/CodeGen/X86/ipra-reg-usage.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ipra-reg-usage.ll?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ipra-reg-usage.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ipra-reg-usage.ll Thu Nov 30 08:12:24 2017
@@ -3,7 +3,7 @@
target triple = "x86_64-unknown-unknown"
declare void @bar1()
define preserve_allcc void @foo()#0 {
-; CHECK: foo Clobbered Registers: CS DS EFLAGS EIP EIZ ES FPSW FS GS IP RIP RIZ SS SSP BND0 BND1 BND2 BND3 CR0 CR1 CR2 CR3 CR4 CR5 CR6 CR7 CR8 CR9 CR10 CR11 CR12 CR13 CR14 CR15 DR0 DR1 DR2 DR3 DR4 DR5 DR6 DR7 DR8 DR9 DR10 DR11 DR12 DR13 DR14 DR15 FP0 FP1 FP2 FP3 FP4 FP5 FP6 FP7 K0 K1 K2 K3 K4 K5 K6 K7 MM0 MM1 MM2 MM3 MM4 MM5 MM6 MM7 R11 ST0 ST1 ST2 ST3 ST4 ST5 ST6 ST7 XMM16 XMM17 XMM18 XMM19 XMM20 XMM21 XMM22 XMM23 XMM24 XMM25 XMM26 XMM27 XMM28 XMM29 XMM30 XMM31 YMM0 YMM1 YMM2 YMM3 YMM4 YMM5 YMM6 YMM7 YMM8 YMM9 YMM10 YMM11 YMM12 YMM13 YMM14 YMM15 YMM16 YMM17 YMM18 YMM19 YMM20 YMM21 YMM22 YMM23 YMM24 YMM25 YMM26 YMM27 YMM28 YMM29 YMM30 YMM31 ZMM0 ZMM1 ZMM2 ZMM3 ZMM4 ZMM5 ZMM6 ZMM7 ZMM8 ZMM9 ZMM10 ZMM11 ZMM12 ZMM13 ZMM14 ZMM15 ZMM16 ZMM17 ZMM18 ZMM19 ZMM20 ZMM21 ZMM22 ZMM23 ZMM24 ZMM25 ZMM26 ZMM27 ZMM28 ZMM29 ZMM30 ZMM31 R11B R11D R11W
+; CHECK: foo Clobbered Registers: %cs %ds %eflags %eip %eiz %es %fpsw %fs %gs %ip %rip %riz %ss %ssp %bnd0 %bnd1 %bnd2 %bnd3 %cr0 %cr1 %cr2 %cr3 %cr4 %cr5 %cr6 %cr7 %cr8 %cr9 %cr10 %cr11 %cr12 %cr13 %cr14 %cr15 %dr0 %dr1 %dr2 %dr3 %dr4 %dr5 %dr6 %dr7 %dr8 %dr9 %dr10 %dr11 %dr12 %dr13 %dr14 %dr15 %fp0 %fp1 %fp2 %fp3 %fp4 %fp5 %fp6 %fp7 %k0 %k1 %k2 %k3 %k4 %k5 %k6 %k7 %mm0 %mm1 %mm2 %mm3 %mm4 %mm5 %mm6 %mm7 %r11 %st0 %st1 %st2 %st3 %st4 %st5 %st6 %st7 %xmm16 %xmm17 %xmm18 %xmm19 %xmm20 %xmm21 %xmm22 %xmm23 %xmm24 %xmm25 %xmm26 %xmm27 %xmm28 %xmm29 %xmm30 %xmm31 %ymm0 %ymm1 %ymm2 %ymm3 %ymm4 %ymm5 %ymm6 %ymm7 %ymm8 %ymm9 %ymm10 %ymm11 %ymm12 %ymm13 %ymm14 %ymm15 %ymm16 %ymm17 %ymm18 %ymm19 %ymm20 %ymm21 %ymm22 %ymm23 %ymm24 %ymm25 %ymm26 %ymm27 %ymm28 %ymm29 %ymm30 %ymm31 %zmm0 %zmm1 %zmm2 %zmm3 %zmm4 %zmm5 %zmm6 %zmm7 %zmm8 %zmm9 %zmm10 %zmm11 %zmm12 %zmm13 %zmm14 %zmm15 %zmm16 %zmm17 %zmm18 %zmm19 %zmm20 %zmm21 %zmm22 %zmm23 %zmm24 %zmm25 %zmm26 %zmm27 %zmm28 %zmm29 %zmm30 %zmm31 %r11b %r11d %r11w
call void @bar1()
call void @bar2()
ret void
Modified: llvm/trunk/test/CodeGen/X86/lea-opt-with-debug.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/lea-opt-with-debug.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/lea-opt-with-debug.mir (original)
+++ llvm/trunk/test/CodeGen/X86/lea-opt-with-debug.mir Thu Nov 30 08:12:24 2017
@@ -95,28 +95,28 @@ body: |
bb.0 (%ir-block.0):
successors: %bb.1(0x80000000)
- ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, _, debug-location !13
- ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, _, debug-location !13
- ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, _, debug-location !14
- ; CHECK: DBG_VALUE debug-use %4, debug-use _, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15
-
- %1 = MOV64rm %rip, 1, _, @c, _, debug-location !13 :: (dereferenceable load 8 from @c)
- %2 = MOVSX64rm32 %rip, 1, _, @a, _, debug-location !13 :: (dereferenceable load 4 from @a)
- %3 = LEA64r %2, 2, %2, 0, _, debug-location !13
- %4 = LEA64r %1, 4, %3, 0, _, debug-location !13
+ ; CHECK: %3:gr64_nosp = LEA64r %2, 2, %2, 0, %noreg, debug-location !13
+ ; CHECK-NEXT: %4:gr64 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13
+ ; CHECK-NOT: %0:gr64 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14
+ ; CHECK: DBG_VALUE debug-use %4, debug-use %noreg, !11, !DIExpression(DW_OP_plus_uconst, 8, DW_OP_stack_value), debug-location !15
+
+ %1 = MOV64rm %rip, 1, %noreg, @c, %noreg, debug-location !13 :: (dereferenceable load 8 from @c)
+ %2 = MOVSX64rm32 %rip, 1, %noreg, @a, %noreg, debug-location !13 :: (dereferenceable load 4 from @a)
+ %3 = LEA64r %2, 2, %2, 0, %noreg, debug-location !13
+ %4 = LEA64r %1, 4, %3, 0, %noreg, debug-location !13
%5 = COPY %4.sub_32bit, debug-location !13
- MOV32mr %rip, 1, _, @d, _, killed %5, debug-location !13 :: (store 4 into @d)
- %0 = LEA64r %1, 4, %3, 8, _, debug-location !14
- DBG_VALUE debug-use %0, debug-use _, !11, !DIExpression(), debug-location !15
+ MOV32mr %rip, 1, %noreg, @d, %noreg, killed %5, debug-location !13 :: (store 4 into @d)
+ %0 = LEA64r %1, 4, %3, 8, %noreg, debug-location !14
+ DBG_VALUE debug-use %0, debug-use %noreg, !11, !DIExpression(), debug-location !15
; CHECK-LABEL: bb.1 (%ir-block.8):
- ; CHECK: %6:gr32 = MOV32rm %4, 1, _, 8, _, debug-location !17 :: (load 4 from %ir.7)
+ ; CHECK: %6:gr32 = MOV32rm %4, 1, %noreg, 8, %noreg, debug-location !17 :: (load 4 from %ir.7)
bb.1 (%ir-block.8):
successors: %bb.1(0x80000000)
- %6 = MOV32rm %0, 1, _, 0, _, debug-location !17 :: (load 4 from %ir.7)
- MOV32mr %rip, 1, _, @d, _, killed %6, debug-location !17 :: (store 4 into @d)
+ %6 = MOV32rm %0, 1, %noreg, 0, %noreg, debug-location !17 :: (load 4 from %ir.7)
+ MOV32mr %rip, 1, %noreg, @d, %noreg, killed %6, debug-location !17 :: (store 4 into @d)
JMP_1 %bb.1, debug-location !18
...
Modified: llvm/trunk/test/CodeGen/X86/leaFixup32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/leaFixup32.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/leaFixup32.mir (original)
+++ llvm/trunk/test/CodeGen/X86/leaFixup32.mir Thu Nov 30 08:12:24 2017
@@ -107,7 +107,7 @@ body: |
; CHECK: %eax = ADD32rr %eax, killed %ebp
; CHECK: %eax = ADD32ri8 %eax, -5
- %eax = LEA32r killed %eax, 1, killed %ebp, -5, _
+ %eax = LEA32r killed %eax, 1, killed %ebp, -5, %noreg
RETQ %eax
...
@@ -142,7 +142,7 @@ body: |
; CHECK: %ebp = ADD32rr %ebp, killed %eax
; CHECK: %ebp = ADD32ri8 %ebp, -5
- %ebp = LEA32r killed %ebp, 1, killed %eax, -5, _
+ %ebp = LEA32r killed %ebp, 1, killed %eax, -5, %noreg
RETQ %ebp
...
@@ -176,7 +176,7 @@ body: |
liveins: %eax, %ebp
; CHECK: %ebp = ADD32rr %ebp, killed %eax
- %ebp = LEA32r killed %ebp, 1, killed %eax, 0, _
+ %ebp = LEA32r killed %ebp, 1, killed %eax, 0, %noreg
RETQ %ebp
...
@@ -212,7 +212,7 @@ body: |
; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA32r killed %eax, 1, killed %ebp, -5, _
+ %ebx = LEA32r killed %eax, 1, killed %ebp, -5, %noreg
RETQ %ebx
...
@@ -245,10 +245,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp
- ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA32r killed %ebp, 1, killed %eax, -5, _
+ %ebx = LEA32r killed %ebp, 1, killed %eax, -5, %noreg
RETQ %ebx
...
@@ -281,9 +281,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp
- ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, _
+ ; CHECK: %ebx = LEA32r killed %eax, 1, killed %ebp, 0, %noreg
- %ebx = LEA32r killed %ebp, 1, killed %eax, 0, _
+ %ebx = LEA32r killed %ebp, 1, killed %eax, 0, %noreg
RETQ %ebx
...
@@ -318,7 +318,7 @@ body: |
; CHECK: %eax = ADD32rr %eax, killed %ebp
; CHECK: %eax = ADD32ri %eax, 129
- %eax = LEA32r killed %eax, 1, killed %ebp, 129, _
+ %eax = LEA32r killed %eax, 1, killed %ebp, 129, %noreg
RETQ %eax
...
@@ -354,7 +354,7 @@ body: |
; CHECK: %ebx = MOV32rr %ebp
; CHECK: %ebx = ADD32rr %ebx, %ebp
- %ebx = LEA32r %ebp, 1, %ebp, 0, _
+ %ebx = LEA32r %ebp, 1, %ebp, 0, %noreg
RETQ %ebx
...
@@ -386,10 +386,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r _, 1, %ebp, 5, _
+ ; CHECK: %ebx = LEA32r %noreg, 1, %ebp, 5, %noreg
; CHECK: %ebx = ADD32rr %ebx, %ebp
- %ebx = LEA32r %ebp, 1, %ebp, 5, _
+ %ebx = LEA32r %ebp, 1, %ebp, 5, %noreg
RETQ %ebx
...
@@ -421,10 +421,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r _, 4, %ebp, 5, _
+ ; CHECK: %ebx = LEA32r %noreg, 4, %ebp, 5, %noreg
; CHECK: %ebx = ADD32rr %ebx, %ebp
- %ebx = LEA32r %ebp, 4, %ebp, 5, _
+ %ebx = LEA32r %ebp, 4, %ebp, 5, %noreg
RETQ %ebx
...
@@ -456,9 +456,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+ ; CHECK: %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg
- %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, _
+ %ebp = LEA32r killed %ebp, 4, killed %ebp, 0, %noreg
RETQ %ebp
...
@@ -490,17 +490,17 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
- ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, _
+ ; CHECK: %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg
+ ; CHECK: %ebp = LEA32r killed %ebx, 4, killed %ebx, 0, %noreg
; CHECK: %ebp = ADD32ri8 %ebp, 5
CMP32rr %eax, killed %ebx, implicit-def %eflags
- %ebx = LEA32r killed %eax, 4, killed %eax, 5, _
+ %ebx = LEA32r killed %eax, 4, killed %eax, 5, %noreg
JE_1 %bb.1, implicit %eflags
RETQ %ebx
bb.1:
liveins: %eax, %ebp, %ebx
- %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, _
+ %ebp = LEA32r killed %ebx, 4, killed %ebx, 5, %noreg
RETQ %ebp
...
Modified: llvm/trunk/test/CodeGen/X86/leaFixup64.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/leaFixup64.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/leaFixup64.mir (original)
+++ llvm/trunk/test/CodeGen/X86/leaFixup64.mir Thu Nov 30 08:12:24 2017
@@ -180,7 +180,7 @@ body: |
; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
; CHECK: %eax = ADD32ri8 %eax, -5
- %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %eax
...
@@ -215,7 +215,7 @@ body: |
; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
; CHECK: %ebp = ADD32ri8 %ebp, -5
- %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebp
...
@@ -249,7 +249,7 @@ body: |
liveins: %rax, %rbp
; CHECK: %ebp = LEA64_32r killed %rax, 1, killed %rbp, 0
- %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ %ebp = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebp
...
@@ -284,7 +284,7 @@ body: |
; CHECK: %rax = ADD64rr %rax, killed %rbp
; CHECK: %rax = ADD64ri8 %rax, -5
- %rax = LEA64r killed %rax, 1, killed %rbp, -5, _
+ %rax = LEA64r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %eax
...
@@ -319,7 +319,7 @@ body: |
; CHECK: %rbp = ADD64rr %rbp, killed %rax
; CHECK: %rbp = ADD64ri8 %rbp, -5
- %rbp = LEA64r killed %rbp, 1, killed %rax, -5, _
+ %rbp = LEA64r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebp
...
@@ -353,7 +353,7 @@ body: |
liveins: %rax, %rbp
; CHECK: %rbp = ADD64rr %rbp, killed %rax
- %rbp = LEA64r killed %rbp, 1, killed %rax, 0, _
+ %rbp = LEA64r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebp
...
@@ -386,10 +386,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, _
+ %ebx = LEA64_32r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %ebx
...
@@ -422,10 +422,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %ebx = ADD32ri8 %ebx, -5
- %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebx
...
@@ -458,9 +458,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 1, killed %rbp, 0, %noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebx
...
@@ -493,10 +493,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %rbx = ADD64ri8 %rbx, -5
- %rbx = LEA64r killed %rax, 1, killed %rbp, -5, _
+ %rbx = LEA64r killed %rax, 1, killed %rbp, -5, %noreg
RETQ %ebx
...
@@ -529,10 +529,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
; CHECK: %rbx = ADD64ri8 %rbx, -5
- %rbx = LEA64r killed %rbp, 1, killed %rax, -5, _
+ %rbx = LEA64r killed %rbp, 1, killed %rax, -5, %noreg
RETQ %ebx
...
@@ -565,9 +565,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp
- ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 1, killed %rbp, 0, %noreg
- %rbx = LEA64r killed %rbp, 1, killed %rax, 0, _
+ %rbx = LEA64r killed %rbp, 1, killed %rax, 0, %noreg
RETQ %ebx
...
@@ -599,11 +599,11 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rdi, %rbp
- ; CHECK: %r12 = LEA64r _, 2, killed %r13, 5, _
+ ; CHECK: %r12 = LEA64r %noreg, 2, killed %r13, 5, %noreg
; CHECK: %r12 = ADD64rr %r12, killed %rbp
%rbp = KILL %rbp, implicit-def %rbp
%r13 = KILL %rdi, implicit-def %r13
- %r12 = LEA64r killed %rbp, 2, killed %r13, 5, _
+ %r12 = LEA64r killed %rbp, 2, killed %r13, 5, %noreg
RETQ %r12
...
@@ -638,7 +638,7 @@ body: |
; CHECK: %eax = LEA64_32r killed %rax, 1, killed %rbp, 0
; CHECK: %eax = ADD32ri %eax, 129
- %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, _
+ %eax = LEA64_32r killed %rax, 1, killed %rbp, 129, %noreg
RETQ %eax
...
@@ -670,9 +670,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 0, %noreg
RETQ %ebx
...
@@ -704,9 +704,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg
- %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, _
+ %ebx = LEA64_32r killed %rbp, 1, killed %rbp, 5, %noreg
RETQ %ebx
...
@@ -738,9 +738,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %eax, %ebp, %ebx
- ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+ ; CHECK: %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg
- %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, _
+ %ebx = LEA64_32r killed %rbp, 4, killed %rbp, 5, %noreg
RETQ %ebx
...
@@ -775,7 +775,7 @@ body: |
; CHECK: %rax = ADD64rr %rax, killed %rbp
; CHECK: %rax = ADD64ri32 %rax, 129
- %rax = LEA64r killed %rax, 1, killed %rbp, 129, _
+ %rax = LEA64r killed %rax, 1, killed %rbp, 129, %noreg
RETQ %eax
...
@@ -810,7 +810,7 @@ body: |
; CHECK: %rbx = MOV64rr %rbp
; CHECK: %rbx = ADD64rr %rbx, %rbp
- %rbx = LEA64r %rbp, 1, %rbp, 0, _
+ %rbx = LEA64r %rbp, 1, %rbp, 0, %noreg
RETQ %ebx
...
@@ -842,10 +842,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r _, 1, %rbp, 5, _
+ ; CHECK: %rbx = LEA64r %noreg, 1, %rbp, 5, %noreg
; CHECK: %rbx = ADD64rr %rbx, %rbp
- %rbx = LEA64r %rbp, 1, %rbp, 5, _
+ %rbx = LEA64r %rbp, 1, %rbp, 5, %noreg
RETQ %ebx
...
@@ -877,10 +877,10 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r _, 4, %rbp, 5, _
+ ; CHECK: %rbx = LEA64r %noreg, 4, %rbp, 5, %noreg
; CHECK: %rbx = ADD64rr %rbx, %rbp
- %rbx = LEA64r %rbp, 4, %rbp, 5, _
+ %rbx = LEA64r %rbp, 4, %rbp, 5, %noreg
RETQ %ebx
...
@@ -912,9 +912,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+ ; CHECK: %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg
- %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, _
+ %rbp = LEA64r killed %rbp, 4, killed %rbp, 0, %noreg
RETQ %ebp
...
@@ -946,17 +946,17 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
- ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg
+ ; CHECK: %rbp = LEA64r killed %rbx, 4, killed %rbx, 0, %noreg
; CHECK: %rbp = ADD64ri8 %rbp, 5
CMP64rr %rax, killed %rbx, implicit-def %eflags
- %rbx = LEA64r killed %rax, 4, killed %rax, 5, _
+ %rbx = LEA64r killed %rax, 4, killed %rax, 5, %noreg
JE_1 %bb.1, implicit %eflags
RETQ %ebx
bb.1:
liveins: %rax, %rbp, %rbx
- %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, _
+ %rbp = LEA64r killed %rbx, 4, killed %rbx, 5, %noreg
RETQ %ebp
...
@@ -988,9 +988,9 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+ ; CHECK: %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg
- %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, _
+ %ebp = LEA64_32r killed %rbp, 4, killed %rbp, 0, %noreg
RETQ %ebp
...
@@ -1022,17 +1022,17 @@ frameInfo:
body: |
bb.0 (%ir-block.0):
liveins: %rax, %rbp, %rbx
- ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
- ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, _
+ ; CHECK: %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg
+ ; CHECK: %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 0, %noreg
; CHECK: %ebp = ADD32ri8 %ebp, 5
CMP64rr %rax, killed %rbx, implicit-def %eflags
- %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, _
+ %ebx = LEA64_32r killed %rax, 4, killed %rax, 5, %noreg
JE_1 %bb.1, implicit %eflags
RETQ %ebx
bb.1:
liveins: %rax, %rbp, %rbx
- %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, _
+ %ebp = LEA64_32r killed %rbx, 4, killed %rbx, 5, %noreg
RETQ %ebp
...
Modified: llvm/trunk/test/CodeGen/X86/movtopush.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/movtopush.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/movtopush.mir (original)
+++ llvm/trunk/test/CodeGen/X86/movtopush.mir Thu Nov 30 08:12:24 2017
@@ -41,10 +41,10 @@
# CHECK-NEXT: CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
# CHECK-NEXT: ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
# CHECK-NEXT: ADJCALLSTACKDOWN32 20, 0, 20, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
-# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, _, 0, _ :: (load 4 from %stack.2.s, align 8)
-# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, _, 4, _ :: (load 4 from %stack.2.s + 4)
-# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, _, 0, _
-# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, _, 0, _
+# CHECK-NEXT: %1:gr32 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8)
+# CHECK-NEXT: %2:gr32 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4)
+# CHECK-NEXT: %4:gr32 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg
+# CHECK-NEXT: %5:gr32 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg
# CHECK-NEXT: PUSH32r %4, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32r %5, implicit-def %esp, implicit %esp
# CHECK-NEXT: PUSH32i8 6, implicit-def %esp, implicit %esp
@@ -101,23 +101,23 @@ body: |
bb.0.entry:
ADJCALLSTACKDOWN32 16, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
%0 = COPY %esp
- MOV32mi %0, 1, _, 12, _, 4 :: (store 4 into stack + 12)
- MOV32mi %0, 1, _, 8, _, 3 :: (store 4 into stack + 8)
- MOV32mi %0, 1, _, 4, _, 2 :: (store 4 into stack + 4)
- MOV32mi %0, 1, _, 0, _, 1 :: (store 4 into stack)
+ MOV32mi %0, 1, %noreg, 12, %noreg, 4 :: (store 4 into stack + 12)
+ MOV32mi %0, 1, %noreg, 8, %noreg, 3 :: (store 4 into stack + 8)
+ MOV32mi %0, 1, %noreg, 4, %noreg, 2 :: (store 4 into stack + 4)
+ MOV32mi %0, 1, %noreg, 0, %noreg, 1 :: (store 4 into stack)
CALLpcrel32 @good, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp
ADJCALLSTACKUP32 16, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
ADJCALLSTACKDOWN32 20, 0, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
- %1 = MOV32rm %stack.2.s, 1, _, 0, _ :: (load 4 from %stack.2.s, align 8)
- %2 = MOV32rm %stack.2.s, 1, _, 4, _ :: (load 4 from %stack.2.s + 4)
+ %1 = MOV32rm %stack.2.s, 1, %noreg, 0, %noreg :: (load 4 from %stack.2.s, align 8)
+ %2 = MOV32rm %stack.2.s, 1, %noreg, 4, %noreg :: (load 4 from %stack.2.s + 4)
%3 = COPY %esp
- MOV32mr %3, 1, _, 4, _, killed %2 :: (store 4)
- MOV32mr %3, 1, _, 0, _, killed %1 :: (store 4)
- %4 = LEA32r %stack.0.p, 1, _, 0, _
- MOV32mr %3, 1, _, 16, _, killed %4 :: (store 4 into stack + 16)
- %5 = LEA32r %stack.1.q, 1, _, 0, _
- MOV32mr %3, 1, _, 12, _, killed %5 :: (store 4 into stack + 12)
- MOV32mi %3, 1, _, 8, _, 6 :: (store 4 into stack + 8)
+ MOV32mr %3, 1, %noreg, 4, %noreg, killed %2 :: (store 4)
+ MOV32mr %3, 1, %noreg, 0, %noreg, killed %1 :: (store 4)
+ %4 = LEA32r %stack.0.p, 1, %noreg, 0, %noreg
+ MOV32mr %3, 1, %noreg, 16, %noreg, killed %4 :: (store 4 into stack + 16)
+ %5 = LEA32r %stack.1.q, 1, %noreg, 0, %noreg
+ MOV32mr %3, 1, %noreg, 12, %noreg, killed %5 :: (store 4 into stack + 12)
+ MOV32mi %3, 1, %noreg, 8, %noreg, 6 :: (store 4 into stack + 8)
CALLpcrel32 @struct, csr_32, implicit %esp, implicit %ssp, implicit-def %esp, implicit-def %ssp,
ADJCALLSTACKUP32 20, 0, implicit-def dead %esp, implicit-def dead %eflags, implicit-def dead %ssp, implicit %esp, implicit %ssp
RET 0
Modified: llvm/trunk/test/CodeGen/X86/non-value-mem-operand.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/non-value-mem-operand.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/non-value-mem-operand.mir (original)
+++ llvm/trunk/test/CodeGen/X86/non-value-mem-operand.mir Thu Nov 30 08:12:24 2017
@@ -175,14 +175,14 @@ body: |
successors: %bb.4.bb7(0x80000000)
liveins: %rax
- MOV64mr %rsp, 1, _, 32, _, %rax :: (store 8 into %stack.5)
+ MOV64mr %rsp, 1, %noreg, 32, %noreg, %rax :: (store 8 into %stack.5)
%r12 = MOV64rr killed %rax
%r12 = ADD64ri8 killed %r12, 16, implicit-def dead %eflags
%xmm0 = XORPSrr undef %xmm0, undef %xmm0
%esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags
%rax = MOV64ri %const.0
- %xmm1 = MOVSDrm killed %rax, 1, _, 0, _ :: (load 8 from constant-pool)
- MOVSDmr %rsp, 1, _, 40, _, killed %xmm1 :: (store 8 into %stack.4)
+ %xmm1 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool)
+ MOVSDmr %rsp, 1, %noreg, 40, %noreg, killed %xmm1 :: (store 8 into %stack.4)
%eax = IMPLICIT_DEF
%ecx = XOR32rr undef %ecx, undef %ecx, implicit-def dead %eflags
@@ -200,11 +200,11 @@ body: |
successors: %bb.6.bb26(0x80000000)
liveins: %ebp, %rbx, %r14, %xmm0
- MOV32mr %rsp, 1, _, 24, _, %ebx :: (store 4 into %stack.0, align 8)
- MOV32mr %rsp, 1, _, 16, _, %ebp :: (store 4 into %stack.1, align 8)
- MOVSDmr %rsp, 1, _, 8, _, killed %xmm0 :: (store 8 into %stack.2)
- %rax = MOV64rm %rsp, 1, _, 32, _ :: (load 8 from %stack.5)
- MOV64mr %rsp, 1, _, 48, _, killed %rax :: (store 8 into %stack.3)
+ MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx :: (store 4 into %stack.0, align 8)
+ MOV32mr %rsp, 1, %noreg, 16, %noreg, %ebp :: (store 4 into %stack.1, align 8)
+ MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2)
+ %rax = MOV64rm %rsp, 1, %noreg, 32, %noreg :: (load 8 from %stack.5)
+ MOV64mr %rsp, 1, %noreg, 48, %noreg, killed %rax :: (store 8 into %stack.3)
%rax = MOV64ri @wibble
STATEPOINT 2882400000, 0, 0, killed %rax, 2, 0, 2, 0, 2, 30, 2, 1, 2, 0, 2, 99, 2, 0, 2, 12, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 10, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 6, 2, 4278124286, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 1, 8, %rsp, 48, 2, 7, 2, 4278124286, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2), (volatile load 8 from %stack.3)
%esi = XOR32rr undef %esi, undef %esi, implicit-def dead %eflags
@@ -215,16 +215,16 @@ body: |
liveins: %ebp, %esi, %rbx, %r12, %r14
%rax = MOV64ri @global.1
- %rax = MOV64rm killed %rax, 1, _, 0, _ :: (dereferenceable load 8 from @global.1)
+ %rax = MOV64rm killed %rax, 1, %noreg, 0, %noreg :: (dereferenceable load 8 from @global.1)
TEST64rr %rax, %rax, implicit-def %eflags
%rax = CMOVE64rr undef %rax, killed %rax, implicit killed %eflags
- %ecx = MOV32rm undef %rax, 1, _, 0, _ :: (load 4 from `i32* undef`)
- %rdx = MOV64rm %r12, 8, %r14, 0, _ :: (load 8 from %ir.tmp3)
- %r15 = LEA64r %rdx, 1, _, 1, _
- MOV64mr %r12, 8, %r14, 0, _, %r15 :: (store 8 into %ir.tmp3)
+ %ecx = MOV32rm undef %rax, 1, %noreg, 0, %noreg :: (load 4 from `i32* undef`)
+ %rdx = MOV64rm %r12, 8, %r14, 0, %noreg :: (load 8 from %ir.tmp3)
+ %r15 = LEA64r %rdx, 1, %noreg, 1, _
+ MOV64mr %r12, 8, %r14, 0, %noreg, %r15 :: (store 8 into %ir.tmp3)
%ecx = SUB32rr killed %ecx, %edx, implicit-def dead %eflags, implicit killed %rdx
- MOV32mr undef %rax, 1, _, 0, _, killed %ecx :: (store 4 into `i32* undef`)
- %r13 = MOV64rm killed %rax, 1, _, 768, _ :: (load 8 from %ir.tmp33)
+ MOV32mr undef %rax, 1, %noreg, 0, %noreg, killed %ecx :: (store 4 into `i32* undef`)
+ %r13 = MOV64rm killed %rax, 1, %noreg, 768, %noreg :: (load 8 from %ir.tmp33)
TEST8rr %sil, %sil, implicit-def %eflags
%rax = IMPLICIT_DEF
JNE_1 %bb.8.bb37, implicit %eflags
@@ -242,7 +242,7 @@ body: |
successors: %bb.9.bb37(0x40000000), %bb.10.bb37(0x40000000)
liveins: %ebp, %esi, %rax, %rbx, %r12, %r13, %r14, %r15
- %rcx = MOV64rm killed %rax, 1, _, 760, _ :: (load 8 from %ir.tmp40)
+ %rcx = MOV64rm killed %rax, 1, %noreg, 760, %noreg :: (load 8 from %ir.tmp40)
CMP64rr %r13, %rcx, implicit-def %eflags
JL_1 %bb.10.bb37, implicit %eflags
@@ -258,12 +258,12 @@ body: |
%cl = KILL %cl, implicit killed %rcx
%r15 = SAR64rCL killed %r15, implicit-def dead %eflags, implicit %cl
- MOV64mr %r12, 8, killed %r14, 0, _, killed %r15 :: (store 8 into %ir.tmp7)
- MOV64mi32 undef %rax, 1, _, 0, _, 0 :: (store 8 into `i64* undef`)
- %eax = LEA64_32r %rbx, 1, _, 1, _
+ MOV64mr %r12, 8, killed %r14, 0, %noreg, killed %r15 :: (store 8 into %ir.tmp7)
+ MOV64mi32 undef %rax, 1, %noreg, 0, %noreg, 0 :: (store 8 into `i64* undef`)
+ %eax = LEA64_32r %rbx, 1, %noreg, 1, _
%ecx = MOV32ri 6
CMP32ri %eax, 15141, implicit-def %eflags
- %xmm0 = MOVSDrm %rsp, 1, _, 40, _ :: (load 8 from %stack.4)
+ %xmm0 = MOVSDrm %rsp, 1, %noreg, 40, %noreg :: (load 8 from %stack.4)
JL_1 %bb.4.bb7, implicit %eflags
bb.11.bb51.loopexit:
@@ -273,14 +273,14 @@ body: |
%ebp = INC32r killed %ebp, implicit-def dead %eflags
%ebx = INC32r %ebx, implicit-def dead %eflags, implicit killed %rbx, implicit-def %rbx
%rax = MOV64ri %const.0
- %xmm0 = MOVSDrm killed %rax, 1, _, 0, _ :: (load 8 from constant-pool)
+ %xmm0 = MOVSDrm killed %rax, 1, %noreg, 0, %noreg :: (load 8 from constant-pool)
bb.12.bb51:
liveins: %ebp, %rbx, %xmm0
- MOV32mr %rsp, 1, _, 24, _, %ebx, implicit killed %rbx :: (store 4 into %stack.0, align 8)
- MOV32mr %rsp, 1, _, 16, _, killed %ebp :: (store 4 into %stack.1, align 8)
- MOVSDmr %rsp, 1, _, 8, _, killed %xmm0 :: (store 8 into %stack.2)
+ MOV32mr %rsp, 1, %noreg, 24, %noreg, %ebx, implicit killed %rbx :: (store 4 into %stack.0, align 8)
+ MOV32mr %rsp, 1, %noreg, 16, %noreg, killed %ebp :: (store 4 into %stack.1, align 8)
+ MOVSDmr %rsp, 1, %noreg, 8, %noreg, killed %xmm0 :: (store 8 into %stack.2)
%rax = MOV64ri @wobble
%edi = MOV32ri -121
STATEPOINT 2882400000, 0, 1, killed %rax, %edi, 2, 0, 2, 0, 2, 38, 2, 1, 2, 0, 2, 270, 2, 4, 2, 12, 2, 0, 2, 11, 2, 4278124286, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 24, 2, 6, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, 2, 10, 1, 8, %rsp, 16, 2, 10, 2, 4278124286, 2, 99, 2, 0, 2, 7, 1, 8, %rsp, 8, 2, 99, 2, 0, 2, 7, 2, 4278124286, 2, 99, 2, 0, 2, 13, 2, 4278124286, 2, 99, 2, 0, 2, 99, 2, 0, csr_64, implicit-def %rsp :: (volatile load 8 from %stack.0), (volatile load 8 from %stack.1), (volatile load 8 from %stack.2)
Modified: llvm/trunk/test/CodeGen/X86/peephole-recurrence.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/peephole-recurrence.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/peephole-recurrence.mir (original)
+++ llvm/trunk/test/CodeGen/X86/peephole-recurrence.mir Thu Nov 30 08:12:24 2017
@@ -215,7 +215,7 @@ body: |
; CHECK: %11:gr32 = ADD32rr
; CHECK-SAME: %1,
; CHECK-SAME: %0,
- MOV32mr %5, 1, _, 0, _, %0 :: (store 4 into %ir.p)
+ MOV32mr %5, 1, %noreg, 0, %noreg, %0 :: (store 4 into %ir.p)
%3 = ADD32rr %2, killed %11, implicit-def dead %eflags
; CHECK: %3:gr32 = ADD32rr
; CHECK-SAME: %2,
Modified: llvm/trunk/test/CodeGen/X86/post-ra-sched-with-debug.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/post-ra-sched-with-debug.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/post-ra-sched-with-debug.mir (original)
+++ llvm/trunk/test/CodeGen/X86/post-ra-sched-with-debug.mir Thu Nov 30 08:12:24 2017
@@ -250,9 +250,9 @@ body: |
successors: %bb.3, %bb.2
liveins: %esi, %rdi, %r14, %rbx, %rbp
- ; CHECK: [[REGISTER:%r[a-z0-9]+]] = LEA64r {{%r[a-z0-9]+}}, 1, _, -20, _
- ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use _, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]]
- ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use _, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]]
+ ; CHECK: [[REGISTER:%r[a-z0-9]+]] = LEA64r {{%r[a-z0-9]+}}, 1, %noreg, -20, %noreg
+ ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[J_VAR]], !DIExpression(), debug-location ![[J_LOC]]
+ ; CHECK-NEXT: DBG_VALUE debug-use [[REGISTER]], debug-use %noreg, ![[I_VAR]], !DIExpression(), debug-location ![[I_LOC]]
frame-setup PUSH64r killed %rbp, implicit-def %rsp, implicit %rsp
CFI_INSTRUCTION def_cfa_offset 16
@@ -267,32 +267,32 @@ body: |
%r14d = MOV32rr %esi
%rbx = MOV64rr %rdi
CALL64pcrel32 @_ZN1lC2Ei, csr_64, implicit %rsp, implicit %rdi, implicit %esi, implicit-def %rsp
- %rdi = LEA64r %rbx, 1, _, 8, _
- DBG_VALUE debug-use %rdi, debug-use _, !20, !17, debug-location !27
- DBG_VALUE debug-use %rdi, debug-use _, !10, !17, debug-location !18
- %rax = MOV64rm %rbx, 1, _, 16, _ :: (load 8)
- MOV64mr %rbx, 1, _, 8, _, killed %rax :: (store 8)
- MOV64mr %rbx, 1, _, 24, _, %rdi :: (store 8)
+ %rdi = LEA64r %rbx, 1, %noreg, 8, %noreg
+ DBG_VALUE debug-use %rdi, debug-use %noreg, !20, !17, debug-location !27
+ DBG_VALUE debug-use %rdi, debug-use %noreg, !10, !17, debug-location !18
+ %rax = MOV64rm %rbx, 1, %noreg, 16, %noreg :: (load 8)
+ MOV64mr %rbx, 1, %noreg, 8, %noreg, killed %rax :: (store 8)
+ MOV64mr %rbx, 1, %noreg, 24, %noreg, %rdi :: (store 8)
%eax = MOV32ri -1
%cl = MOV8rr %r14b, implicit killed %r14d
%eax = SHL32rCL killed %eax, implicit-def dead %eflags, implicit %cl
- MOV32mr %rbx, 1, _, 32, _, %eax :: (store 4, align 8)
- MOV32mi %rbp, 1, _, -20, _, 0 :: (store 4)
- %rcx = MOV64rm %rbx, 1, _, 8, _ :: (load 8)
- MOV64mr %rip, 1, _, @n, _, %rcx :: (store 8)
+ MOV32mr %rbx, 1, %noreg, 32, %noreg, %eax :: (store 4, align 8)
+ MOV32mi %rbp, 1, %noreg, -20, %noreg, 0 :: (store 4)
+ %rcx = MOV64rm %rbx, 1, %noreg, 8, %noreg :: (load 8)
+ MOV64mr %rip, 1, %noreg, @n, %noreg, %rcx :: (store 8)
%edx = XOR32rr undef %edx, undef %edx, implicit-def dead %eflags, implicit-def %rdx
TEST64rr %rcx, %rcx, implicit-def %eflags
%esi = MOV32ri @o, implicit-def %rsi
%rsi = CMOVNE64rr killed %rsi, %rdx, implicit killed %eflags
%rsi = OR64rr killed %rsi, killed %rcx, implicit-def %eflags
- %rcx = LEA64r %rbp, 1, _, -20, _
- DBG_VALUE debug-use %rcx, debug-use _, !46, !17, debug-location !48
- DBG_VALUE debug-use %rcx, debug-use _, !39, !17, debug-location !44
+ %rcx = LEA64r %rbp, 1, %noreg, -20, %noreg
+ DBG_VALUE debug-use %rcx, debug-use %noreg, !46, !17, debug-location !48
+ DBG_VALUE debug-use %rcx, debug-use %noreg, !39, !17, debug-location !44
DBG_VALUE %rbp, -20, !29, !17, debug-location !36
%rcx = CMOVNE64rr killed %rcx, killed %rdx, implicit killed %eflags
%rcx = OR64rr killed %rcx, killed %rsi, implicit-def dead %eflags
- %rdx = MOVSX64rm32 %rbx, 1, _, 0, _ :: (load 4, align 8)
- TEST32mr killed %rcx, 4, killed %rdx, 0, _, killed %eax, implicit-def %eflags :: (load 4)
+ %rdx = MOVSX64rm32 %rbx, 1, %noreg, 0, %noreg :: (load 4, align 8)
+ TEST32mr killed %rcx, 4, killed %rdx, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4)
JNE_1 %bb.2, implicit %eflags
JMP_1 %bb.3
@@ -300,7 +300,7 @@ body: |
successors: %bb.2
liveins: %rbx, %rbp
- %rdi = MOV64rm %rbx, 1, _, 24, _ :: (load 8)
+ %rdi = MOV64rm %rbx, 1, %noreg, 24, %noreg :: (load 8)
bb.2:
successors: %bb.1, %bb.3
@@ -308,11 +308,11 @@ body: |
CALL64pcrel32 @_ZN1p2aaEv, csr_64, implicit %rsp, implicit %rdi, implicit-def %rsp, implicit-def %eax
%eax = KILL %eax, implicit-def %rax
- %ecx = LEA64_32r %rax, 1, _, -1, _, implicit-def %rcx
+ %ecx = LEA64_32r %rax, 1, %noreg, -1, %noreg, implicit-def %rcx
%ecx = SHR32ri %ecx, 31, implicit-def dead %eflags, implicit killed %rcx, implicit-def %rcx
- %eax = LEA64_32r killed %rax, 1, killed %rcx, -1, _
+ %eax = LEA64_32r killed %rax, 1, killed %rcx, -1, %noreg
%eax = SAR32r1 killed %eax, implicit-def dead %eflags
- CMP32mr %rbx, 1, _, 0, _, killed %eax, implicit-def %eflags :: (load 4, align 8), (load 4, align 8)
+ CMP32mr %rbx, 1, %noreg, 0, %noreg, killed %eax, implicit-def %eflags :: (load 4, align 8), (load 4, align 8)
JG_1 %bb.1, implicit killed %eflags
bb.3:
Modified: llvm/trunk/test/CodeGen/X86/pr27681.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr27681.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr27681.mir (original)
+++ llvm/trunk/test/CodeGen/X86/pr27681.mir Thu Nov 30 08:12:24 2017
@@ -47,11 +47,11 @@ body: |
TEST32rr %edx, %edx, implicit-def %eflags
%cl = SETNEr implicit %eflags
; This %bl def is antidependent on the above use of %ebx
- %bl = MOV8rm %esp, 1, _, 3, _ ; :: (load 1 from %stack.0)
+ %bl = MOV8rm %esp, 1, %noreg, 3, _ ; :: (load 1 from %stack.0)
%cl = OR8rr killed %cl, %bl, implicit-def dead %eflags
%esi = MOVZX32rr8 killed %cl
%esi = ADD32rr killed %esi, killed %edi, implicit-def dead %eflags
- %ecx = MOV32rm %esp, 1, _, 24, _ ; :: (load 4 from %stack.2)
+ %ecx = MOV32rm %esp, 1, %noreg, 24, _ ; :: (load 4 from %stack.2)
%edx = SAR32rCL killed %edx, implicit-def dead %eflags, implicit %cl
TEST32rr killed %edx, %edx, implicit-def %eflags
%cl = SETNEr implicit %eflags
@@ -66,7 +66,7 @@ body: |
bb.2:
liveins: %cl, %eax, %ebp, %esi
- OR32mr %esp, 1, _, 8, _, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1)
+ OR32mr %esp, 1, %noreg, 8, %noreg, killed %eax, implicit-def %eflags ; :: (store 4 into %stack.1)
%dl = SETNEr implicit %eflags, implicit-def %edx
bb.3:
Modified: llvm/trunk/test/CodeGen/X86/pre-coalesce.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pre-coalesce.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pre-coalesce.mir (original)
+++ llvm/trunk/test/CodeGen/X86/pre-coalesce.mir Thu Nov 30 08:12:24 2017
@@ -83,10 +83,10 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- %0 = MOV64rm %rip, 1, _, @b, _ :: (dereferenceable load 8 from @b)
- %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0)
+ %0 = MOV64rm %rip, 1, %noreg, @b, %noreg :: (dereferenceable load 8 from @b)
+ %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
TEST8rr %12, %12, implicit-def %eflags
- %11 = MOV32rm %rip, 1, _, @a, _ :: (dereferenceable load 4 from @a)
+ %11 = MOV32rm %rip, 1, %noreg, @a, %noreg :: (dereferenceable load 4 from @a)
JNE_1 %bb.1.while.body.preheader, implicit killed %eflags
bb.4:
@@ -101,8 +101,8 @@ body: |
%10 = SHL32ri %10, 5, implicit-def dead %eflags
%10 = ADD32rr %10, %11, implicit-def dead %eflags
%10 = ADD32rr %10, %8, implicit-def dead %eflags
- MOV32mr %rip, 1, _, @a, _, %10 :: (store 4 into @a)
- %12 = MOV8rm %0, 1, _, 0, _ :: (load 1 from %ir.t0)
+ MOV32mr %rip, 1, %noreg, @a, %noreg, %10 :: (store 4 into @a)
+ %12 = MOV8rm %0, 1, %noreg, 0, %noreg :: (load 1 from %ir.t0)
TEST8rr %12, %12, implicit-def %eflags
%11 = COPY %10
JNE_1 %bb.2.while.body, implicit killed %eflags
Modified: llvm/trunk/test/CodeGen/X86/system-intrinsics-xgetbv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/system-intrinsics-xgetbv.ll?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/system-intrinsics-xgetbv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/system-intrinsics-xgetbv.ll Thu Nov 30 08:12:24 2017
@@ -18,4 +18,4 @@ define i64 @test_xgetbv(i32 %in) {
ret i64 %1;
}
-declare i64 @llvm.x86.xgetbv(i32)
\ No newline at end of file
+declare i64 @llvm.x86.xgetbv(i32)
Modified: llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir?rev=319445&r1=319444&r2=319445&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir (original)
+++ llvm/trunk/test/CodeGen/X86/tail-merge-after-mbp.mir Thu Nov 30 08:12:24 2017
@@ -5,25 +5,25 @@
# check loop bb.9 is not merged with bb.12
# CHECK: bb.2:
# CHECK-NEXT: successors: %bb.3(0x30000000), %bb.4(0x50000000)
-# CHECK: %rax = MOV64rm %r14, 1, _, 0, _
+# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
# CHECK-NEXT: TEST64rr %rax, %rax
# CHECK-NEXT: JE_1 %bb.3
# CHECK: bb.4:
# CHECK-NEXT: successors: %bb.5(0x30000000), %bb.10(0x50000000)
-# CHECK: CMP64mi8 killed %rax, 1, _, 8, _, 0
+# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0
# CHECK-NEXT: JNE_1 %bb.10
# CHECK: bb.5:
# CHECK-NEXT: successors: %bb.6(0x30000000), %bb.7(0x50000000)
-# CHECK: %rax = MOV64rm %r14, 1, _, 0, _
+# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
# CHECK-NEXT: TEST64rr %rax, %rax
# CHECK-NEXT: JE_1 %bb.6
# CHECK: bb.7
# CHECK-NEXT: successors: %bb.8(0x71555555), %bb.10(0x0eaaaaab)
-# CHECK: CMP64mi8 killed %rax, 1, _, 8, _, 0
+# CHECK: CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0
# CHECK-NEXT: JNE_1 %bb.10
# CHECK: bb.8:
# CHECK-NEXT: successors: %bb.9(0x04000000), %bb.7(0x7c000000)
-# CHECK: %rax = MOV64rm %r14, 1, _, 0, _
+# CHECK: %rax = MOV64rm %r14, 1, %noreg, 0, %noreg
# CHECK-NEXT: TEST64rr %rax, %rax
# CHECK-NEXT: JNE_1 %bb.7
@@ -44,7 +44,7 @@ body: |
bb.7:
successors: %bb.8(0x30000000), %bb.9(0x50000000)
- %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8)
+ %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
TEST64rr %rax, %rax, implicit-def %eflags
JNE_1 %bb.9, implicit killed %eflags
@@ -57,13 +57,13 @@ body: |
bb.9:
successors: %bb.10(0x30000000), %bb.15(0x50000000)
- CMP64mi8 killed %rax, 1, _, 8, _, 0, implicit-def %eflags :: (load 8)
+ CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8)
JNE_1 %bb.15, implicit %eflags
bb.10:
successors: %bb.11(0x30000000), %bb.12(0x50000000)
- %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8)
+ %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
TEST64rr %rax, %rax, implicit-def %eflags
JNE_1 %bb.12, implicit %eflags
@@ -76,13 +76,13 @@ body: |
bb.12:
successors: %bb.13(0x71555555), %bb.15(0x0eaaaaab)
- CMP64mi8 killed %rax, 1, _, 8, _, 0, implicit-def %eflags :: (load 8), (load 8)
+ CMP64mi8 killed %rax, 1, %noreg, 8, %noreg, 0, implicit-def %eflags :: (load 8), (load 8)
JNE_1 %bb.15, implicit %eflags
bb.13:
successors: %bb.14(0x04000000), %bb.12(0x7c000000)
- %rax = MOV64rm %r14, 1, _, 0, _ :: (load 8)
+ %rax = MOV64rm %r14, 1, %noreg, 0, %noreg :: (load 8)
TEST64rr %rax, %rax, implicit-def %eflags
JNE_1 %bb.12, implicit %eflags
More information about the llvm-commits
mailing list