[llvm] r323922 - Followup on Proposal to move MIR physical register namespace to '$' sigil.
Puyan Lotfi via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 31 14:04:29 PST 2018
Modified: llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/byval-agg-info.ll Wed Jan 31 14:04:26 2018
@@ -13,5 +13,5 @@ entry:
; Make sure that the MMO on the store has no offset from the byval
; variable itself (we used to have mem:ST8[%v+64]).
-; CHECK: STD killed renamable %x5, 176, %x1; mem:ST8[%v](align=16)
+; CHECK: STD killed renamable $x5, 176, $x1; mem:ST8[%v](align=16)
Modified: llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-R0-special-handling.mir Wed Jan 31 14:04:26 2018
@@ -94,8 +94,8 @@ registers:
- { id: 4, class: gprc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -118,18 +118,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x0, %x4
+ liveins: $x0, $x4
- %1:g8rc = COPY %x4
- %0:g8rc = COPY %x0
+ %1:g8rc = COPY $x4
+ %0:g8rc = COPY $x0
%2:gprc = LI 44
%3:gprc = COPY %1.sub_32
- %4:gprc = ADD4 killed %r0, killed %2
+ %4:gprc = ADD4 killed $r0, killed %2
; CHECK: li 3, 44
; CHECK: add 3, 0, 3
%5:g8rc = EXTSW_32_64 killed %4
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -148,8 +148,8 @@ registers:
- { id: 4, class: gprc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -172,18 +172,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x0, %x4
+ liveins: $x0, $x4
- %1:g8rc = COPY %x4
- %0:g8rc = COPY %x0
+ %1:g8rc = COPY $x4
+ %0:g8rc = COPY $x0
%2:gprc = COPY %0.sub_32
%3:gprc = LI 44
- %4:gprc = ADD4 killed %3, killed %r0
+ %4:gprc = ADD4 killed %3, killed $r0
; CHECK: li 3, 44
; CHECK: add 3, 3, 0
%5:g8rc = EXTSW_32_64 killed %4
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -202,8 +202,8 @@ registers:
- { id: 4, class: gprc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -226,17 +226,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1:g8rc = COPY %x4
- %0:g8rc = COPY %x3
+ %1:g8rc = COPY $x4
+ %0:g8rc = COPY $x3
%2:gprc = COPY %0.sub_32
- %r0 = LI 44
- %4:gprc = ADD4 killed %r0, killed %2
+ $r0 = LI 44
+ %4:gprc = ADD4 killed $r0, killed %2
; CHECK: addi 3, 3, 44
%5:g8rc = EXTSW_32_64 killed %4
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -255,8 +255,8 @@ registers:
- { id: 4, class: gprc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -279,17 +279,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1:g8rc = COPY %x4
- %0:g8rc = COPY %x3
+ %1:g8rc = COPY $x4
+ %0:g8rc = COPY $x3
%2:gprc = COPY %0.sub_32
- %r0 = LI 44
- %4:gprc = ADD4 killed %2, killed %r0
+ $r0 = LI 44
+ %4:gprc = ADD4 killed %2, killed $r0
; CHECK: addi 3, 3, 44
%5:g8rc = EXTSW_32_64 killed %4
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -305,8 +305,8 @@ registers:
- { id: 1, class: g8rc, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x0', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x0', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -329,15 +329,15 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x0, %x4
+ liveins: $x0, $x4
- %1:g8rc = COPY %x4
+ %1:g8rc = COPY $x4
%0:g8rc_and_g8rc_nox0 = LI8 44
- %2:g8rc = LDX %0, %x0 :: (load 8 from %ir.1, !tbaa !3)
+ %2:g8rc = LDX %0, $x0 :: (load 8 from %ir.1, !tbaa !3)
; CHECK: li 3, 44
; CHECK: ldx 3, 3, 0
- %x3 = COPY %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -353,8 +353,8 @@ registers:
- { id: 1, class: g8rc, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -377,14 +377,14 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1:g8rc = LI8 44
%0:g8rc_and_g8rc_nox0 = LI8 44
- %2:g8rc = LDX %zero8, %1 :: (load 8 from %ir.1, !tbaa !3)
+ %2:g8rc = LDX $zero8, %1 :: (load 8 from %ir.1, !tbaa !3)
; CHECK: ld 3, 44(0)
- %x3 = COPY %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -400,8 +400,8 @@ registers:
- { id: 1, class: g8rc, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -424,13 +424,13 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %x0 = LI8 44
- %0:g8rc_and_g8rc_nox0 = COPY %x3
- %2:g8rc = LDX %0, %x0 :: (load 8 from %ir.1, !tbaa !3)
+ $x0 = LI8 44
+ %0:g8rc_and_g8rc_nox0 = COPY $x3
+ %2:g8rc = LDX %0, $x0 :: (load 8 from %ir.1, !tbaa !3)
; CHECK: ld 3, 44(3)
- %x3 = COPY %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs-out-of-range.mir Wed Jan 31 14:04:26 2018
@@ -226,7 +226,7 @@ registers:
- { id: 3, class: g8rc, preferred-register: '' }
- { id: 4, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -249,17 +249,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
- %0 = COPY %x3
+ %0 = COPY $x3
%1 = COPY %0.sub_32
%3 = IMPLICIT_DEF
%2 = LI 170
%4 = RLWNM killed %1, %2, 20, 27
; CHECK: RLWINM killed %1, 10, 20, 27
; CHECK-LATE: rlwinm 3, 3, 10, 20, 27
- %x3 = EXTSW_32_64 %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -276,7 +276,7 @@ registers:
- { id: 1, class: g8rc, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -299,15 +299,15 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
%0 = LI8 234
- %1 = COPY %x3
+ %1 = COPY $x3
%2 = RLWNM8 %1, %0, 20, 27
; CHECK: RLWINM8 %1, 10, 20, 27
; CHECK-LATE: rlwinm 3, 3, 10, 20, 27
- %x3 = COPY %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -331,8 +331,8 @@ registers:
- { id: 8, class: g8rc, preferred-register: '' }
- { id: 9, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -355,23 +355,23 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI -22
- %4 = RLWNMo %2, %3, 24, 31, implicit-def %cr0
- ; CHECK: RLWINMo %2, 10, 24, 31, implicit-def %cr0
+ %4 = RLWNMo %2, %3, 24, 31, implicit-def $cr0
+ ; CHECK: RLWINMo %2, 10, 24, 31, implicit-def $cr0
; CHECK-LATE: li 3, -22
; CHECK-LATE: rlwinm. 5, 4, 10, 24, 31
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %3, %5.sub_eq
%8 = IMPLICIT_DEF
%7 = INSERT_SUBREG %8, killed %6, 1
%9 = RLDICL killed %7, 0, 32
- %x3 = COPY %9
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %9
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -394,8 +394,8 @@ registers:
- { id: 7, class: crrc, preferred-register: '' }
- { id: 8, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -418,19 +418,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI8 -18
- %3 = RLWNM8o %1, %2, 20, 27, implicit-def %cr0
- ; CHECK: RLWINM8o %1, 14, 20, 27, implicit-def %cr0
+ %3 = RLWNM8o %1, %2, 20, 27, implicit-def $cr0
+ ; CHECK: RLWINM8o %1, 14, 20, 27, implicit-def $cr0
; CHECK-LATE: rlwinm. 3, 4, 14, 20, 27
- %7 = COPY killed %cr0
+ %7 = COPY killed $cr0
%6 = RLDICL killed %3, 0, 32
%8 = ISEL8 %1, %6, %7.sub_eq
- %x3 = COPY %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -453,8 +453,8 @@ registers:
- { id: 7, class: g8rc, preferred-register: '' }
- { id: 8, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -477,17 +477,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%5 = LI 210
%8 = SLW killed %2, killed %5
; CHECK: RLWINM killed %2, 18, 0, 13
; CHECK-LATE: slwi 3, 4, 18
- %x3 = EXTSW_32_64 %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -511,8 +511,8 @@ registers:
- { id: 8, class: g8rc, preferred-register: '' }
- { id: 9, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -535,22 +535,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 35
%3 = COPY %0.sub_32
- %4 = SLWo %3, %2, implicit-def %cr0
- ; CHECK: ANDIo %3, 0, implicit-def %cr0
+ %4 = SLWo %3, %2, implicit-def $cr0
+ ; CHECK: ANDIo %3, 0, implicit-def $cr0
; CHECK-LATE: andi. 5, 3, 0
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %3, %5.sub_eq
%8 = IMPLICIT_DEF
%7 = INSERT_SUBREG %8, killed %6, 1
%9 = RLDICL killed %7, 0, 32
- %x3 = COPY %9
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %9
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -573,8 +573,8 @@ registers:
- { id: 7, class: g8rc, preferred-register: '' }
- { id: 8, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -597,17 +597,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 48
%5 = COPY %0.sub_32
%8 = SRW killed %5, killed %2
; CHECK: LI 0
; CHECK-LATE: li 3, 0
- %x3 = EXTSW_32_64 %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -631,8 +631,8 @@ registers:
- { id: 8, class: g8rc, preferred-register: '' }
- { id: 9, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -655,22 +655,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI -7
%3 = COPY %0.sub_32
- %4 = SRWo %3, %2, implicit-def %cr0
- ; CHECK: ANDIo %3, 0, implicit-def %cr0
+ %4 = SRWo %3, %2, implicit-def $cr0
+ ; CHECK: ANDIo %3, 0, implicit-def $cr0
; CHECK-LATE: andi. 5, 3, 0
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %3, %5.sub_eq
%8 = IMPLICIT_DEF
%7 = INSERT_SUBREG %8, killed %6, 1
%9 = RLDICL killed %7, 0, 32
- %x3 = COPY %9
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %9
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -690,8 +690,8 @@ registers:
- { id: 4, class: gprc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -714,19 +714,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 48
%3 = COPY %0.sub_32
- %4 = SRAW killed %3, killed %2, implicit-def dead %carry
+ %4 = SRAW killed %3, killed %2, implicit-def dead $carry
; CHECK: LI 48
- ; CHECK: SRAW killed %3, killed %2, implicit-def dead %carry
+ ; CHECK: SRAW killed %3, killed %2, implicit-def dead $carry
; CHECK-LATE: sraw 3, 3, 4
%5 = EXTSW_32_64 killed %4
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -748,8 +748,8 @@ registers:
- { id: 6, class: gprc, preferred-register: '' }
- { id: 7, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -772,20 +772,20 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 80
%3 = COPY %0.sub_32
- %4 = SRAWo killed %3, %2, implicit-def dead %carry, implicit-def %cr0
- ; CHECK: SRAWo killed %3, %2, implicit-def dead %carry, implicit-def %cr0
+ %4 = SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0
+ ; CHECK: SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0
; CHECK-LATE: sraw. 3, 3, 4
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %4, %5.sub_eq
%7 = EXTSW_32_64 killed %6
- %x3 = COPY %7
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %7
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -804,8 +804,8 @@ registers:
- { id: 3, class: gprc, preferred-register: '' }
- { id: 4, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -828,17 +828,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI 140
%4 = RLDCL %0, killed %3, 0
; CHECK: RLDICL %0, 12, 0
; CHECK-LATE: rotldi 3, 3, 12
- %x3 = COPY %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -859,8 +859,8 @@ registers:
- { id: 5, class: crrc, preferred-register: '' }
- { id: 6, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -883,19 +883,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = RLDICL %1, 0, 58
%3 = LI -37
- %4 = RLDCLo %0, killed %3, 0, implicit-def %cr0
- ; CHECK: RLDICLo %0, 27, 0, implicit-def %cr0
+ %4 = RLDCLo %0, killed %3, 0, implicit-def $cr0
+ ; CHECK: RLDICLo %0, 27, 0, implicit-def $cr0
; CHECK-LATE: rldicl. 5, 3, 27, 0
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL8 %2, %0, %5.sub_eq
- %x3 = COPY %6
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %6
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -914,8 +914,8 @@ registers:
- { id: 3, class: gprc, preferred-register: '' }
- { id: 4, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -938,17 +938,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI 300
%4 = RLDCR %0, killed %3, 0
; CHECK: RLDICR %0, 44, 0
; CHECK-LATE: rldicr 3, 3, 44, 0
- %x3 = COPY %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -969,8 +969,8 @@ registers:
- { id: 5, class: crrc, preferred-register: '' }
- { id: 6, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -993,19 +993,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = RLDICL %1, 0, 58
%3 = LI -18
- %4 = RLDCRo %0, killed %3, 0, implicit-def %cr0
- ; CHECK: RLDICRo %0, 46, 0, implicit-def %cr0
+ %4 = RLDCRo %0, killed %3, 0, implicit-def $cr0
+ ; CHECK: RLDICRo %0, 46, 0, implicit-def $cr0
; CHECK-LATE: rldicr. 5, 3, 46, 0
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL8 %2, %0, %5.sub_eq
- %x3 = COPY %6
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %6
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1023,8 +1023,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1047,16 +1047,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI -13
%3 = SLD %0, killed %2
; CHECK: LI8 0
; CHECK-LATE: li 3, 0
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1076,8 +1076,8 @@ registers:
- { id: 4, class: crrc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1100,18 +1100,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 88
- %3 = SLDo %0, killed %2, implicit-def %cr0
- ; CHECK: ANDIo8 %0, 0, implicit-def %cr0
+ %3 = SLDo %0, killed %2, implicit-def $cr0
+ ; CHECK: ANDIo8 %0, 0, implicit-def $cr0
; CHECK-LATE: andi. 5, 3, 0
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = ISEL8 %1, %0, %4.sub_eq
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1129,8 +1129,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1153,16 +1153,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 400
%3 = SRD %0, killed %2
; CHECK: RLDICL %0, 48, 16
; CHECK-LATE: rldicl 3, 3, 48, 16
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1182,8 +1182,8 @@ registers:
- { id: 4, class: crrc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1206,18 +1206,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 64
- %3 = SRDo %0, killed %2, implicit-def %cr0
- ; CHECK: ANDIo8 %0, 0, implicit-def %cr0
+ %3 = SRDo %0, killed %2, implicit-def $cr0
+ ; CHECK: ANDIo8 %0, 0, implicit-def $cr0
; CHECK-LATE: andi. 5, 3, 0
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = ISEL8 %1, %0, %4.sub_eq
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1235,8 +1235,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1259,16 +1259,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI -44
- %3 = SRAD %0, killed %2, implicit-def dead %carry
- ; CHECK: SRAD %0, killed %2, implicit-def dead %carry
+ %3 = SRAD %0, killed %2, implicit-def dead $carry
+ ; CHECK: SRAD %0, killed %2, implicit-def dead $carry
; CHECK-LATE: srad 3, 3, 4
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1288,8 +1288,8 @@ registers:
- { id: 4, class: crrc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1312,18 +1312,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 68
- %3 = SRADo %0, killed %2, implicit-def dead %carry, implicit-def %cr0
- ; CHECK: SRADo %0, killed %2, implicit-def dead %carry, implicit-def %cr0
+ %3 = SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0
+ ; CHECK: SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0
; CHECK-LATE: srad. 3, 3, 5
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = ISEL8 %1, %3, %4.sub_eq
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
Modified: llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/convert-rr-to-ri-instrs.mir Wed Jan 31 14:04:26 2018
@@ -1009,8 +1009,8 @@ registers:
- { id: 5, class: gprc, preferred-register: '' }
- { id: 6, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1033,10 +1033,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 33
%3 = COPY %0.sub_32
%4 = ADD4 killed %3, %2
@@ -1046,8 +1046,8 @@ body: |
; CHECK-LATE: addi 3, 3, 33
; CHECK-LATE: addi 3, 3, 33
%6 = EXTSW_32_64 killed %5
- %x3 = COPY %6
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %6
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1065,8 +1065,8 @@ registers:
- { id: 2, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1089,18 +1089,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1 = LI8 33
- %0 = COPY %x3
+ %0 = COPY $x3
%2 = ADD8 %0, %1
%3 = ADD8 killed %1, killed %2
; CHECK: ADDI8 %0, 33
; CHECK: ADDI8 killed %2, 33
; CHECK-LATE: addi 3, 3, 33
; CHECK-LATE: addi 3, 3, 33
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1123,10 +1123,10 @@ registers:
- { id: 7, class: g8rc, preferred-register: '' }
- { id: 8, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
- - { reg: '%x6', virtual-reg: '%3' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
+ - { reg: '$x6', virtual-reg: '%3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1149,22 +1149,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5, %x6
+ liveins: $x3, $x4, $x5, $x6
- %3 = COPY %x6
- %2 = COPY %x5
- %1 = COPY %x4
- %0 = COPY %x3
+ %3 = COPY $x6
+ %2 = COPY $x5
+ %1 = COPY $x4
+ %0 = COPY $x3
%4 = COPY %0.sub_32
%5 = LI 55
- %6 = ADDC %5, %4, implicit-def %carry
- ; CHECK: ADDIC %4, 55, implicit-def %carry
+ %6 = ADDC %5, %4, implicit-def $carry
+ ; CHECK: ADDIC %4, 55, implicit-def $carry
; CHECK-LATE: addic 3, 3, 55
- %7 = ADDE8 %3, %1, implicit-def dead %carry, implicit %carry
+ %7 = ADDE8 %3, %1, implicit-def dead $carry, implicit $carry
%8 = EXTSW_32_64 %6
- %x3 = COPY %8
- %x4 = COPY %7
- BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4
+ $x3 = COPY %8
+ $x4 = COPY %7
+ BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4
...
---
@@ -1184,10 +1184,10 @@ registers:
- { id: 4, class: g8rc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
- - { reg: '%x6', virtual-reg: '%3' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
+ - { reg: '$x6', virtual-reg: '%3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1210,19 +1210,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5, %x6
+ liveins: $x3, $x4, $x5, $x6
- %3 = COPY %x6
- %2 = COPY %x5
- %1 = COPY %x4
+ %3 = COPY $x6
+ %2 = COPY $x5
+ %1 = COPY $x4
%0 = LI8 777
- %4 = ADDC8 %2, %0, implicit-def %carry
- ; CHECK: ADDIC8 %2, 777, implicit-def %carry
+ %4 = ADDC8 %2, %0, implicit-def $carry
+ ; CHECK: ADDIC8 %2, 777, implicit-def $carry
; CHECK-LATE: addic 3, 5, 777
- %5 = ADDE8 %3, %1, implicit-def dead %carry, implicit %carry
- %x3 = COPY %4
- %x4 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4
+ %5 = ADDE8 %3, %1, implicit-def dead $carry, implicit $carry
+ $x3 = COPY %4
+ $x4 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4
...
---
@@ -1245,8 +1245,8 @@ registers:
- { id: 7, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 8, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1269,21 +1269,21 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1 = LI 433
- %0 = COPY %x3
+ %0 = COPY $x3
%2 = COPY %0.sub_32
- %3 = ADDCo %1, %2, implicit-def %cr0, implicit-def %carry
- ; CHECK: ADDICo %2, 433, implicit-def %cr0, implicit-def %carry
+ %3 = ADDCo %1, %2, implicit-def $cr0, implicit-def $carry
+ ; CHECK: ADDICo %2, 433, implicit-def $cr0, implicit-def $carry
; CHECK-LATE: addic. 3, 3, 433
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = COPY %4.sub_eq
%6 = LI8 0
%7 = LI8 -1
%8 = ISEL8 %7, %6, %5
- %x3 = COPY %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1301,7 +1301,7 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1324,16 +1324,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
- %0 = COPY %x3
+ %0 = COPY $x3
%1 = LI 77
%2 = ADDI killed %1, 44
%3 = EXTSW_32_64 killed %2
; CHECK: LI 121
; CHECK-LATE: li 3, 121
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1351,7 +1351,7 @@ registers:
- { id: 2, class: g8rc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1374,16 +1374,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
- %0 = COPY %x3
+ %0 = COPY $x3
%1 = LI8 333
%2 = ADDI8 killed %1, 44
; CHECK: LI8 377
; CHECK-LATE: li 3, 377
%3 = EXTSW killed %2
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1404,8 +1404,8 @@ registers:
- { id: 5, class: gprc, preferred-register: '' }
- { id: 6, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1428,19 +1428,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1 = LI 78
- %0 = COPY %x3
+ %0 = COPY $x3
%2 = COPY %0.sub_32
- %3 = ANDo %1, %2, implicit-def %cr0
- ; CHECK: ANDIo %2, 78, implicit-def %cr0
+ %3 = ANDo %1, %2, implicit-def $cr0
+ ; CHECK: ANDIo %2, 78, implicit-def $cr0
; CHECK-LATE: andi. 5, 3, 78
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = ISEL %2, %1, %4.sub_eq
%6 = EXTSW_32_64 killed %5
- %x3 = COPY %6
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %6
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1459,8 +1459,8 @@ registers:
- { id: 3, class: crrc, preferred-register: '' }
- { id: 4, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1483,17 +1483,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1 = LI8 321
- %0 = COPY %x3
- %2 = AND8o %1, %0, implicit-def %cr0
- ; CHECK: ANDIo8 %0, 321, implicit-def %cr0
+ %0 = COPY $x3
+ %2 = AND8o %1, %0, implicit-def $cr0
+ ; CHECK: ANDIo8 %0, 321, implicit-def $cr0
; CHECK-LATE: andi. 5, 3, 321
- %3 = COPY killed %cr0
+ %3 = COPY killed $cr0
%4 = ISEL8 %1, %0, %3.sub_eq
- %x3 = COPY %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1513,8 +1513,8 @@ registers:
- { id: 4, class: g8rc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1537,17 +1537,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1 = LI8 65533
- %0 = COPY %x3
+ %0 = COPY $x3
%2 = CMPD %0, %1
; CHECK: CMPDI %0, -3
; CHECK-LATE: cmpdi 3, -3
- %4 = ISEL8 %zero8, %0, %2.sub_gt
+ %4 = ISEL8 $zero8, %0, %2.sub_gt
%5 = ADD8 killed %4, %1
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1567,8 +1567,8 @@ registers:
- { id: 4, class: g8rc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1591,16 +1591,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 89
%2 = CMPDI %0, 87
- %4 = ISEL8 %zero8, %0, %2.sub_gt
+ %4 = ISEL8 $zero8, %0, %2.sub_gt
; CHECK: LI8 0
%5 = ADD8 killed %4, %1
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1620,8 +1620,8 @@ registers:
- { id: 4, class: g8rc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1644,16 +1644,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 87
%2 = CMPDI %0, 87
- %4 = ISEL8 %zero8, %0, %2.sub_gt
+ %4 = ISEL8 $zero8, %0, %2.sub_gt
; CHECK: COPY %0
%5 = ADD8 killed %4, %1
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1673,8 +1673,8 @@ registers:
- { id: 4, class: g8rc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1697,17 +1697,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1 = LI8 99
- %0 = COPY %x3
+ %0 = COPY $x3
%2 = CMPLD %0, %1
; CHECK: CMPLDI %0, 99
; CHECK-LATE: cmpldi 3, 99
- %4 = ISEL8 %zero8, %0, %2.sub_gt
+ %4 = ISEL8 $zero8, %0, %2.sub_gt
%5 = ADD8 killed %4, %1
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1727,8 +1727,8 @@ registers:
- { id: 4, class: g8rc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1751,16 +1751,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 65534
%2 = CMPLDI %0, 65535
- %4 = ISEL8 %zero8, %0, %2.sub_gt
+ %4 = ISEL8 $zero8, %0, %2.sub_gt
; CHECK: COPY %0
%5 = ADD8 killed %4, %1
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1783,8 +1783,8 @@ registers:
- { id: 7, class: gprc, preferred-register: '' }
- { id: 8, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1807,19 +1807,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI -1
%3 = COPY %0.sub_32
%4 = CMPW %3, %2
; CHECK: CMPWI %3, -1
- %6 = ISEL %zero, %3, %4.sub_gt
+ %6 = ISEL $zero, %3, %4.sub_gt
%7 = ADD4 killed %6, %2
%8 = EXTSW_32_64 killed %7
- %x3 = COPY %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1842,8 +1842,8 @@ registers:
- { id: 7, class: gprc, preferred-register: '' }
- { id: 8, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1866,19 +1866,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI -3
%4 = CMPWI %3, 87
- %6 = ISEL %zero, %3, %4.sub_gt
+ %6 = ISEL $zero, %3, %4.sub_gt
; CHECK: COPY %3
%7 = ADD4 killed %6, killed %2
%8 = EXTSW_32_64 killed %7
- %x3 = COPY %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1903,8 +1903,8 @@ registers:
- { id: 9, class: g8rc, preferred-register: '' }
- { id: 10, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1927,22 +1927,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 32767
%3 = COPY %0.sub_32
%4 = CMPLW %3, %2
; CHECK: CMPLWI %3, 32767
; CHECK-LATE: cmplwi 3, 32767
- %6 = ISEL %zero, %3, %4.sub_gt
+ %6 = ISEL $zero, %3, %4.sub_gt
%7 = ADD4 killed %6, %2
%9 = IMPLICIT_DEF
%8 = INSERT_SUBREG %9, killed %7, 1
%10 = RLDICL killed %8, 0, 32
- %x3 = COPY %10
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %10
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -1967,8 +1967,8 @@ registers:
- { id: 9, class: g8rc, preferred-register: '' }
- { id: 10, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -1991,21 +1991,21 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI -3
%4 = CMPLWI %3, 87
- %6 = ISEL %zero, %3, %4.sub_gt
+ %6 = ISEL $zero, %3, %4.sub_gt
; CHECK: LI 0
%7 = ADD4 killed %6, killed %2
%9 = IMPLICIT_DEF
%8 = INSERT_SUBREG %9, killed %7, 1
%10 = RLDICL killed %8, 0, 32
- %x3 = COPY %10
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %10
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2037,8 +2037,8 @@ registers:
- { id: 16, class: g8rc, preferred-register: '' }
- { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2061,10 +2061,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2082,8 +2082,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = RLWINM8 killed %14, 0, 24, 31
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2114,8 +2114,8 @@ registers:
- { id: 15, class: g8rc, preferred-register: '' }
- { id: 16, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2138,9 +2138,9 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 45
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
@@ -2161,8 +2161,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = RLWINM8 killed %14, 0, 24, 31
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2194,8 +2194,8 @@ registers:
- { id: 16, class: g8rc, preferred-register: '' }
- { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2218,10 +2218,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2239,8 +2239,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = RLWINM8 killed %14, 0, 16, 31
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2271,8 +2271,8 @@ registers:
- { id: 15, class: g8rc, preferred-register: '' }
- { id: 16, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2295,10 +2295,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2316,8 +2316,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = RLWINM8 killed %14, 0, 16, 31
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2349,8 +2349,8 @@ registers:
- { id: 16, class: g8rc, preferred-register: '' }
- { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2373,10 +2373,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2394,8 +2394,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = EXTSH8 killed %14
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2426,8 +2426,8 @@ registers:
- { id: 15, class: g8rc, preferred-register: '' }
- { id: 16, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2450,10 +2450,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2473,8 +2473,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = EXTSH8 killed %14
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2507,8 +2507,8 @@ registers:
- { id: 17, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 18, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2531,10 +2531,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2554,8 +2554,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = RLDICL killed %14, 0, 32
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2586,8 +2586,8 @@ registers:
- { id: 15, class: g8rc, preferred-register: '' }
- { id: 16, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2610,9 +2610,9 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 1000
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
@@ -2633,8 +2633,8 @@ body: |
%15 = IMPLICIT_DEF
%14 = INSERT_SUBREG %15, killed %13, 1
%16 = RLDICL killed %14, 0, 32
- %x3 = COPY %16
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %16
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2662,8 +2662,8 @@ registers:
- { id: 12, class: g8rc, preferred-register: '' }
- { id: 13, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2686,9 +2686,9 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 444
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
@@ -2706,8 +2706,8 @@ body: |
; CHECK: LWA 444, killed %11
; CHECK-LATE: lwa 3, 444(4)
%13 = ADD8 killed %12, killed %7
- %x3 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2737,8 +2737,8 @@ registers:
- { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 15, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2761,10 +2761,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2781,8 +2781,8 @@ body: |
; CHECK: LDU 200, %0
; CHECK-LATE: ldu 4, 200(3)
%13 = ADD8 killed %12, killed %7
- %x3 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2810,8 +2810,8 @@ registers:
- { id: 12, class: g8rc, preferred-register: '' }
- { id: 13, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2834,10 +2834,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2854,8 +2854,8 @@ body: |
; CHECK: LD 280, %0
; CHECK-LATE: ld 12, 280(3)
%13 = ADD8 killed %12, killed %7
- %x3 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -2885,8 +2885,8 @@ registers:
- { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 15, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2909,10 +2909,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -2928,9 +2928,9 @@ body: |
%12,%15 = LFDUX %0, killed %11 :: (load 8 from %ir.arrayidx3, !tbaa !12)
; CHECK: LFDU 16, %0
; CHECK-LATE: lfdu 1, 16(3)
- %13 = FADD killed %7, killed %12, implicit %rm
- %f1 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %f1
+ %13 = FADD killed %7, killed %12, implicit $rm
+ $f1 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $f1
...
---
@@ -2958,8 +2958,8 @@ registers:
- { id: 12, class: f8rc, preferred-register: '' }
- { id: 13, class: f8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -2982,9 +2982,9 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 -20
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
@@ -3001,9 +3001,9 @@ body: |
%12 = LFDX %0, killed %11 :: (load 8 from %ir.arrayidx3, !tbaa !12)
; CHECK: LFD -20, killed %11
; CHECK-LATE: lfd 1, -20(4)
- %13 = FADD killed %7, killed %12, implicit %rm
- %f1 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %f1
+ %13 = FADD killed %7, killed %12, implicit $rm
+ $f1 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $f1
...
---
@@ -3042,8 +3042,8 @@ registers:
- { id: 23, class: g8rc, preferred-register: '' }
- { id: 24, class: vrrc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3081,41 +3081,41 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI8 72
%3, %4 = LFSUX %0, killed %2 :: (load 4 from %ir.arrayidx, !tbaa !14)
; CHECK: LFSU 72, %0
; CHECK-LATE: lfsu 0, 72(3)
- %5 = FCTIWUZ killed %3, implicit %rm
+ %5 = FCTIWUZ killed %3, implicit $rm
%6 = ADDI8 %stack.4, 0
- STFIWX killed %5, %zero8, killed %6
+ STFIWX killed %5, $zero8, killed %6
%7 = LWZ 0, %stack.4 :: (load 4 from %stack.4)
%8 = LFS 4, %4 :: (load 4 from %ir.3, !tbaa !14)
- %10 = FCTIWUZ %8, implicit %rm
+ %10 = FCTIWUZ %8, implicit $rm
%11 = ADDI8 %stack.1, 0
- STFIWX killed %10, %zero8, killed %11
+ STFIWX killed %10, $zero8, killed %11
%12 = LWZ 0, %stack.1 :: (load 4 from %stack.1)
%13 = LFS 8, %4 :: (load 4 from %ir.5, !tbaa !14)
- %15 = FCTIWUZ %13, implicit %rm
+ %15 = FCTIWUZ %13, implicit $rm
%16 = ADDI8 %stack.2, 0
- STFIWX killed %15, %zero8, killed %16
+ STFIWX killed %15, $zero8, killed %16
%17 = LWZ 0, %stack.2 :: (load 4 from %stack.2)
%18 = LFS 12, %4 :: (load 4 from %ir.7, !tbaa !14)
- %20 = FCTIWUZ %18, implicit %rm
+ %20 = FCTIWUZ %18, implicit $rm
%21 = ADDI8 %stack.3, 0
- STFIWX killed %20, %zero8, killed %21
+ STFIWX killed %20, $zero8, killed %21
%22 = LWZ 0, %stack.3 :: (load 4 from %stack.3)
STW killed %7, 0, %stack.0 :: (store 4 into %stack.0, align 16)
STW killed %22, 12, %stack.0 :: (store 4 into %stack.0 + 12)
STW killed %17, 8, %stack.0 :: (store 4 into %stack.0 + 8, align 8)
STW killed %12, 4, %stack.0 :: (store 4 into %stack.0 + 4)
%23 = ADDI8 %stack.0, 0
- %24 = LVX %zero8, killed %23 :: (load 16 from %stack.0)
- %v2 = COPY %24
- BLR8 implicit %lr8, implicit %rm, implicit %v2
+ %24 = LVX $zero8, killed %23 :: (load 16 from %stack.0)
+ $v2 = COPY %24
+ BLR8 implicit $lr8, implicit $rm, implicit $v2
...
---
@@ -3143,8 +3143,8 @@ registers:
- { id: 12, class: f4rc, preferred-register: '' }
- { id: 13, class: f4rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3167,10 +3167,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -3186,9 +3186,9 @@ body: |
%12 = LFSX %0, killed %11 :: (load 4 from %ir.arrayidx3, !tbaa !14)
; CHECK: LFS -88, %0
; CHECK-LATE: lfs 1, -88(3)
- %13 = FADDS killed %7, killed %12, implicit %rm
- %f1 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %f1
+ %13 = FADDS killed %7, killed %12, implicit $rm
+ $f1 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $f1
...
---
@@ -3216,8 +3216,8 @@ registers:
- { id: 12, class: vsfrc, preferred-register: '' }
- { id: 13, class: vsfrc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3240,28 +3240,28 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
%4 = INSERT_SUBREG %5, killed %3, 1
%6 = LI8 100
- %7 = LXSDX %0, killed %6, implicit %rm :: (load 8 from %ir.arrayidx, !tbaa !12)
+ %7 = LXSDX %0, killed %6, implicit $rm :: (load 8 from %ir.arrayidx, !tbaa !12)
; CHECK: LXSD 100, %0
; CHECK-LATE: lxsd 0, 100(3)
%8 = ADDI %2, 2
%10 = IMPLICIT_DEF
%9 = INSERT_SUBREG %10, killed %8, 1
%11 = LI8 -120
- %12 = LXSDX %0, killed %11, implicit %rm :: (load 8 from %ir.arrayidx3, !tbaa !12)
+ %12 = LXSDX %0, killed %11, implicit $rm :: (load 8 from %ir.arrayidx3, !tbaa !12)
; CHECK: LXSD -120, %0
; CHECK-LATE: lxsd 1, -120(3)
- %13 = XSADDDP killed %7, killed %12, implicit %rm
- %f1 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %f1
+ %13 = XSADDDP killed %7, killed %12, implicit $rm
+ $f1 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $f1
...
---
@@ -3289,8 +3289,8 @@ registers:
- { id: 12, class: vssrc, preferred-register: '' }
- { id: 13, class: vssrc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3313,10 +3313,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -3333,8 +3333,8 @@ body: |
; CHECK: LXSSP -92, %0
; CHECK-LATE: lxssp 1, -92(3)
%13 = XSADDSP killed %7, killed %12
- %f1 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %f1
+ $f1 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $f1
...
---
@@ -3362,8 +3362,8 @@ registers:
- { id: 12, class: vrrc, preferred-register: '' }
- { id: 13, class: vrrc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3386,10 +3386,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = ADDI %2, 1
%5 = IMPLICIT_DEF
@@ -3406,8 +3406,8 @@ body: |
; CHECK: LXV -16, %0
; CHECK-LATE: lxv 35, -16(3)
%13 = VADDUWM killed %12, killed %7
- %v2 = COPY %13
- BLR8 implicit %lr8, implicit %rm, implicit %v2
+ $v2 = COPY %13
+ BLR8 implicit $lr8, implicit $rm, implicit $v2
...
---
@@ -3425,8 +3425,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3449,16 +3449,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI 99
%3 = COPY %1.sub_32
%2 = OR %0, %3
; CHECK: ORI %3, 99
; CHECK-LATE: ori 3, 4, 99
- %x3 = EXTSW_32_64 %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3475,8 +3475,8 @@ registers:
- { id: 1, class: g8rc, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3499,15 +3499,15 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 777
%2 = OR8 %1, %0
; CHECK: ORI8 %1, 777
; CHECK-LATE: ori 3, 4, 777
- %x3 = COPY %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3523,7 +3523,7 @@ registers:
- { id: 0, class: gprc, preferred-register: '' }
- { id: 1, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3546,14 +3546,14 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
%0 = LI 777
%1 = ORI %0, 88
; CHECK: LI 857
; CHECK-LATE: li 3, 857
- %x3 = EXTSW_32_64 %1
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %1
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3569,7 +3569,7 @@ registers:
- { id: 0, class: g8rc, preferred-register: '' }
- { id: 1, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3592,14 +3592,14 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
%0 = LI8 8721
%1 = ORI8 %0, 99
; CHECK: LI8 8819
; CHECK-LATE: li 3, 8819
- %x3 = COPY %1
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %1
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3618,8 +3618,8 @@ registers:
- { id: 3, class: gprc, preferred-register: '' }
- { id: 4, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3642,17 +3642,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI 14
%4 = RLDCL %0, killed %3, 0
; CHECK: RLDICL %0, 14, 0
; CHECK-LATE: rotldi 3, 3, 14
- %x3 = COPY %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3673,8 +3673,8 @@ registers:
- { id: 5, class: crrc, preferred-register: '' }
- { id: 6, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3697,19 +3697,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = RLDICL %1, 0, 58
%3 = LI 37
- %4 = RLDCLo %0, killed %3, 0, implicit-def %cr0
- ; CHECK: RLDICLo %0, 37, 0, implicit-def %cr0
+ %4 = RLDCLo %0, killed %3, 0, implicit-def $cr0
+ ; CHECK: RLDICLo %0, 37, 0, implicit-def $cr0
; CHECK-LATE: rldicl. 5, 3, 37, 0
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL8 %2, %0, %5.sub_eq
- %x3 = COPY %6
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %6
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3728,8 +3728,8 @@ registers:
- { id: 3, class: gprc, preferred-register: '' }
- { id: 4, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3752,17 +3752,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI 0
%4 = RLDCR %0, killed %3, 0
; CHECK: RLDICR %0, 0, 0
; CHECK-LATE: rldicr 3, 3, 0, 0
- %x3 = COPY %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3783,8 +3783,8 @@ registers:
- { id: 5, class: crrc, preferred-register: '' }
- { id: 6, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3807,19 +3807,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = RLDICL %1, 0, 58
%3 = LI 18
- %4 = RLDCRo %0, killed %3, 0, implicit-def %cr0
- ; CHECK: RLDICRo %0, 18, 0, implicit-def %cr0
+ %4 = RLDCRo %0, killed %3, 0, implicit-def $cr0
+ ; CHECK: RLDICRo %0, 18, 0, implicit-def $cr0
; CHECK-LATE: rldicr. 5, 3, 18, 0
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL8 %2, %0, %5.sub_eq
- %x3 = COPY %6
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %6
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3835,7 +3835,7 @@ registers:
- { id: 0, class: g8rc, preferred-register: '' }
- { id: 1, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3858,14 +3858,14 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
%0 = LI8 -1
%1 = RLDICL %0, 53, 49
; CHECK: LI8 32767
; CHECK-LATE: li 3, 32767
- %x3 = COPY %1
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %1
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3884,8 +3884,8 @@ registers:
- { id: 3, class: crrc, preferred-register: '' }
- { id: 4, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3908,18 +3908,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 -1
- %2 = RLDICLo %0, 53, 48, implicit-def %cr0
+ %2 = RLDICLo %0, 53, 48, implicit-def $cr0
; CHECK: ANDIo8 %0, 65535
; CHECK-LATE: li 3, -1
; CHECK-LATE: andi. 3, 3, 65535
- %3 = COPY killed %cr0
+ %3 = COPY killed $cr0
%4 = ISEL8 %1, %2, %3.sub_eq
- %x3 = COPY %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3938,8 +3938,8 @@ registers:
- { id: 3, class: crrc, preferred-register: '' }
- { id: 4, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -3962,17 +3962,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 200
- %2 = RLDICLo %0, 61, 3, implicit-def %cr0
+ %2 = RLDICLo %0, 61, 3, implicit-def $cr0
; CHECK-NOT: ANDI
; CHECK-LATE-NOT: andi.
- %3 = COPY killed %cr0
+ %3 = COPY killed $cr0
%4 = ISEL8 %1, %2, %3.sub_eq
- %x3 = COPY %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -3991,7 +3991,7 @@ registers:
- { id: 3, class: g8rc, preferred-register: '' }
- { id: 4, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4014,17 +4014,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
- %0 = COPY %x3
+ %0 = COPY $x3
%1 = COPY %0.sub_32
%3 = IMPLICIT_DEF
%2 = LI 17
%4 = RLWINM killed %2, 4, 20, 27
; CHECK: LI 272
; CHECK-LATE: li 3, 272
- %x3 = EXTSW_32_64 %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4043,7 +4043,7 @@ registers:
- { id: 3, class: g8rc, preferred-register: '' }
- { id: 4, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4066,17 +4066,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
- %0 = COPY %x3
+ %0 = COPY $x3
%1 = COPY %0.sub_32
%3 = IMPLICIT_DEF
%2 = LI 2
%4 = RLWINM killed %2, 31, 0, 31
; CHECK: LI 1
; CHECK-LATE: li 3, 1
- %x3 = EXTSW_32_64 %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4095,7 +4095,7 @@ registers:
- { id: 3, class: g8rc, preferred-register: '' }
- { id: 4, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4118,17 +4118,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
- %0 = COPY %x3
+ %0 = COPY $x3
%1 = COPY %0.sub_32
%3 = IMPLICIT_DEF
%2 = LI 1
%4 = RLWINM killed %2, 31, 0, 31
; CHECK: RLWINM killed %2, 31, 0, 31
; CHECK-LATE: rotlwi 3, 3, 31
- %x3 = EXTSW_32_64 %4
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %4
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4144,7 +4144,7 @@ registers:
- { id: 0, class: g8rc, preferred-register: '' }
- { id: 1, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4167,14 +4167,14 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
%0 = LI8 234
%1 = RLWINM8 %0, 4, 20, 27
; CHECK: LI8 3744
; CHECK-LATE: li 3, 3744
- %x3 = COPY %1
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %1
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4198,8 +4198,8 @@ registers:
- { id: 8, class: g8rc, preferred-register: '' }
- { id: 9, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4222,23 +4222,23 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI -22
- %4 = RLWINMo %3, 0, 24, 31, implicit-def %cr0
+ %4 = RLWINMo %3, 0, 24, 31, implicit-def $cr0
; CHECK: ANDIo %3, 234
; CHECK-LATE: li 3, -22
; CHECK-LATE: andi. 5, 3, 234
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %3, %5.sub_eq
%8 = IMPLICIT_DEF
%7 = INSERT_SUBREG %8, killed %6, 1
%9 = RLDICL killed %7, 0, 32
- %x3 = COPY %9
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %9
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4262,8 +4262,8 @@ registers:
- { id: 8, class: g8rc, preferred-register: '' }
- { id: 9, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4286,22 +4286,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%3 = LI -22
- %4 = RLWINMo %3, 5, 24, 31, implicit-def %cr0
+ %4 = RLWINMo %3, 5, 24, 31, implicit-def $cr0
; CHECK-NOT: ANDI
; CHECK-LATE-NOT: andi.
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %3, %5.sub_eq
%8 = IMPLICIT_DEF
%7 = INSERT_SUBREG %8, killed %6, 1
%9 = RLDICL killed %7, 0, 32
- %x3 = COPY %9
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %9
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4324,8 +4324,8 @@ registers:
- { id: 7, class: crrc, preferred-register: '' }
- { id: 8, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4348,20 +4348,20 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI8 -18
- %3 = RLWINM8o %2, 4, 20, 27, implicit-def %cr0
+ %3 = RLWINM8o %2, 4, 20, 27, implicit-def $cr0
; CHECK: ANDIo8 %2, 3808
; CHECK-LATE: li 3, -18
; CHECK-LATE: andi. 3, 3, 3808
- %7 = COPY killed %cr0
+ %7 = COPY killed $cr0
%6 = RLDICL killed %3, 0, 32
%8 = ISEL8 %1, %6, %7.sub_eq
- %x3 = COPY %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4379,8 +4379,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4403,16 +4403,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 13
%3 = SLD %0, killed %2
; CHECK: RLDICR %0, 13, 50
; CHECK-LATE: sldi 3, 3, 13
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4432,8 +4432,8 @@ registers:
- { id: 4, class: crrc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4456,18 +4456,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 17
- %3 = SLDo %0, killed %2, implicit-def %cr0
- ; CHECK: RLDICRo %0, 17, 46, implicit-def %cr0
+ %3 = SLDo %0, killed %2, implicit-def $cr0
+ ; CHECK: RLDICRo %0, 17, 46, implicit-def $cr0
; CHECK-LATE: rldicr. 5, 3, 17, 46
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = ISEL8 %1, %0, %4.sub_eq
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4485,8 +4485,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4509,16 +4509,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 4
%3 = SRD %0, killed %2
; CHECK: RLDICL %0, 60, 4
; CHECK-LATE: rldicl 3, 3, 60, 4
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4538,8 +4538,8 @@ registers:
- { id: 4, class: crrc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4562,18 +4562,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 17
- %3 = SRDo %0, killed %2, implicit-def %cr0
- ; CHECK: RLDICLo %0, 47, 17, implicit-def %cr0
+ %3 = SRDo %0, killed %2, implicit-def $cr0
+ ; CHECK: RLDICLo %0, 47, 17, implicit-def $cr0
; CHECK-LATE: rldicl. 5, 3, 47, 17
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = ISEL8 %1, %0, %4.sub_eq
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4596,8 +4596,8 @@ registers:
- { id: 7, class: g8rc, preferred-register: '' }
- { id: 8, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4620,17 +4620,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = COPY %1.sub_32
%5 = LI 21
%8 = SLW killed %2, killed %5
; CHECK: RLWINM killed %2, 21, 0, 10
; CHECK-LATE: slwi 3, 4, 21
- %x3 = EXTSW_32_64 %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4654,8 +4654,8 @@ registers:
- { id: 8, class: g8rc, preferred-register: '' }
- { id: 9, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4678,22 +4678,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 11
%3 = COPY %0.sub_32
- %4 = SLWo %3, %2, implicit-def %cr0
- ; CHECK: RLWINMo %3, 11, 0, 20, implicit-def %cr0
+ %4 = SLWo %3, %2, implicit-def $cr0
+ ; CHECK: RLWINMo %3, 11, 0, 20, implicit-def $cr0
; CHECK-LATE: rlwinm. 5, 3, 11, 0, 20
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %3, %5.sub_eq
%8 = IMPLICIT_DEF
%7 = INSERT_SUBREG %8, killed %6, 1
%9 = RLDICL killed %7, 0, 32
- %x3 = COPY %9
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %9
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4716,8 +4716,8 @@ registers:
- { id: 7, class: g8rc, preferred-register: '' }
- { id: 8, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4740,17 +4740,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 8
%5 = COPY %0.sub_32
%8 = SRW killed %5, killed %2
; CHECK: RLWINM killed %5, 24, 8, 31
; CHECK-LATE: srwi 3, 3, 8
- %x3 = EXTSW_32_64 %8
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %8
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4774,8 +4774,8 @@ registers:
- { id: 8, class: g8rc, preferred-register: '' }
- { id: 9, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4798,22 +4798,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 7
%3 = COPY %0.sub_32
- %4 = SRWo %3, %2, implicit-def %cr0
+ %4 = SRWo %3, %2, implicit-def $cr0
; CHECK: RLWINMo %3, 25, 7, 31
; CHECK-LATE: rlwinm. 5, 3, 25, 7, 31
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %3, %5.sub_eq
%8 = IMPLICIT_DEF
%7 = INSERT_SUBREG %8, killed %6, 1
%9 = RLDICL killed %7, 0, 32
- %x3 = COPY %9
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %9
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4833,8 +4833,8 @@ registers:
- { id: 4, class: gprc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4857,18 +4857,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 15
%3 = COPY %0.sub_32
- %4 = SRAW killed %3, killed %2, implicit-def dead %carry
- ; CHECK: SRAWI killed %3, 15, implicit-def dead %carry
+ %4 = SRAW killed %3, killed %2, implicit-def dead $carry
+ ; CHECK: SRAWI killed %3, 15, implicit-def dead $carry
; CHECK-LATE: srawi 3, 3, 15
%5 = EXTSW_32_64 killed %4
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4890,8 +4890,8 @@ registers:
- { id: 6, class: gprc, preferred-register: '' }
- { id: 7, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4914,20 +4914,20 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 8
%3 = COPY %0.sub_32
- %4 = SRAWo killed %3, %2, implicit-def dead %carry, implicit-def %cr0
- ; CHECK: SRAWIo killed %3, 8, implicit-def dead %carry, implicit-def %cr0
+ %4 = SRAWo killed %3, %2, implicit-def dead $carry, implicit-def $cr0
+ ; CHECK: SRAWIo killed %3, 8, implicit-def dead $carry, implicit-def $cr0
; CHECK-LATE: srawi. 3, 3, 8
- %5 = COPY killed %cr0
+ %5 = COPY killed $cr0
%6 = ISEL %2, %4, %5.sub_eq
%7 = EXTSW_32_64 killed %6
- %x3 = COPY %7
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %7
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4945,8 +4945,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -4969,16 +4969,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 44
- %3 = SRAD %0, killed %2, implicit-def dead %carry
- ; CHECK: SRADI %0, 44, implicit-def dead %carry
+ %3 = SRAD %0, killed %2, implicit-def dead $carry
+ ; CHECK: SRADI %0, 44, implicit-def dead $carry
; CHECK-LATE: sradi 3, 3, 44
- %x3 = COPY %3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -4998,8 +4998,8 @@ registers:
- { id: 4, class: crrc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5022,18 +5022,18 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
- %0 = COPY %x3
+ %1 = COPY $x4
+ %0 = COPY $x3
%2 = LI 61
- %3 = SRADo %0, killed %2, implicit-def dead %carry, implicit-def %cr0
- ; CHECK: SRADIo %0, 61, implicit-def dead %carry, implicit-def %cr0
+ %3 = SRADo %0, killed %2, implicit-def dead $carry, implicit-def $cr0
+ ; CHECK: SRADIo %0, 61, implicit-def dead $carry, implicit-def $cr0
; CHECK-LATE: sradi. 3, 3, 61
- %4 = COPY killed %cr0
+ %4 = COPY killed $cr0
%5 = ISEL8 %1, %3, %4.sub_eq
- %x3 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -5062,9 +5062,9 @@ registers:
- { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5087,11 +5087,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $x4
+ %0 = COPY $x3
%3 = COPY %1.sub_32
%4 = COPY %2.sub_32
%5 = ADDI %4, 1
@@ -5108,7 +5108,7 @@ body: |
%14 = STBUX %3, %0, killed %12 :: (store 1 into %ir.arrayidx3, !tbaa !3)
; CHECK: STBU %3, 777, %0
; CHECK-LATE: 4, 777(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5135,9 +5135,9 @@ registers:
- { id: 11, class: g8rc, preferred-register: '' }
- { id: 12, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5160,10 +5160,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
+ %2 = COPY $x5
+ %1 = COPY $x4
%0 = LI8 975
%3 = COPY %1.sub_32
%4 = COPY %2.sub_32
@@ -5181,7 +5181,7 @@ body: |
STBX %3, %0, killed %12 :: (store 1 into %ir.arrayidx3, !tbaa !3)
; CHECK: STB %3, 975, killed %12
; CHECK-LATE: stb 4, 975(5)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5210,9 +5210,9 @@ registers:
- { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5235,11 +5235,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $x4
+ %0 = COPY $x3
%3 = COPY %1.sub_32
%4 = COPY %2.sub_32
%5 = ADDI %4, 1
@@ -5256,7 +5256,7 @@ body: |
%14 = STHUX %3, %0, killed %12 :: (store 2 into %ir.arrayidx3, !tbaa !6)
; CHECK: STHU %3, -761, %0
; CHECK-LATE: sthu 4, -761(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5283,9 +5283,9 @@ registers:
- { id: 11, class: g8rc, preferred-register: '' }
- { id: 12, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5308,11 +5308,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $x4
+ %0 = COPY $x3
%3 = COPY %1.sub_32
%4 = COPY %2.sub_32
%5 = ADDI %4, 1
@@ -5329,7 +5329,7 @@ body: |
STHX %3, %0, killed %12 :: (store 1 into %ir.arrayidx3, !tbaa !3)
; CHECK: STH %3, -900, %0
; CHECK-LATE: sth 4, -900(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5358,9 +5358,9 @@ registers:
- { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 14, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5383,11 +5383,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $x4
+ %0 = COPY $x3
%3 = COPY %1.sub_32
%4 = COPY %2.sub_32
%5 = ADDI %4, 1
@@ -5404,7 +5404,7 @@ body: |
%14 = STWUX %3, %0, killed %12 :: (store 4 into %ir.arrayidx3, !tbaa !8)
; CHECK: STWU %3, 0, %0
; CHECK-LATE: stwu 4, 0(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5431,9 +5431,9 @@ registers:
- { id: 11, class: g8rc, preferred-register: '' }
- { id: 12, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5456,11 +5456,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $x4
+ %0 = COPY $x3
%3 = COPY %1.sub_32
%4 = COPY %2.sub_32
%5 = ADDI %4, 1
@@ -5477,7 +5477,7 @@ body: |
STWX %3, %0, killed %12 :: (store 4 into %ir.arrayidx3, !tbaa !8)
; CHECK: STW %3, 99, %0
; CHECK-LATE: stw 4, 99(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5505,9 +5505,9 @@ registers:
- { id: 12, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5530,11 +5530,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $x4
+ %0 = COPY $x3
%3 = COPY %2.sub_32
%4 = ADDI %3, 1
%6 = IMPLICIT_DEF
@@ -5550,7 +5550,7 @@ body: |
%13 = STDUX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !10)
; CHECK: STDU %1, -8, %0
; CHECK-LATE: stdu 4, -8(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5576,9 +5576,9 @@ registers:
- { id: 10, class: g8rc, preferred-register: '' }
- { id: 11, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5601,10 +5601,10 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %2 = COPY %x5
- %1 = COPY %x4
+ %2 = COPY $x5
+ %1 = COPY $x4
%0 = LI8 1000
%3 = COPY %2.sub_32
%4 = ADDI %3, 1
@@ -5621,7 +5621,7 @@ body: |
STDX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !10)
; CHECK: STD %1, 1000, killed %11
; CHECK-LATE: 4, 1000(6)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5647,9 +5647,9 @@ registers:
- { id: 10, class: g8rc, preferred-register: '' }
- { id: 11, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%f1', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$f1', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5672,11 +5672,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %f1, %x5
+ liveins: $x3, $f1, $x5
- %2 = COPY %x5
- %1 = COPY %f1
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $f1
+ %0 = COPY $x3
%3 = COPY %2.sub_32
%4 = ADDI %3, 1
%6 = IMPLICIT_DEF
@@ -5692,7 +5692,7 @@ body: |
STFSX %1, %0, killed %11 :: (store 4 into %ir.arrayidx3, !tbaa !14)
; CHECK: STFS %1, -401, %0
; CHECK-LATE: stfs 1, -401(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5720,9 +5720,9 @@ registers:
- { id: 12, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%f1', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$f1', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5745,11 +5745,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %f1, %x5
+ liveins: $x3, $f1, $x5
- %2 = COPY %x5
- %1 = COPY %f1
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $f1
+ %0 = COPY $x3
%3 = COPY %2.sub_32
%4 = ADDI %3, 1
%6 = IMPLICIT_DEF
@@ -5765,7 +5765,7 @@ body: |
%13 = STFSUX %1, %0, killed %11 :: (store 4 into %ir.arrayidx3, !tbaa !14)
; CHECK: STFSU %1, 987, %0
; CHECK-LATE: stfsu 1, 987(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5791,9 +5791,9 @@ registers:
- { id: 10, class: g8rc, preferred-register: '' }
- { id: 11, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%f1', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$f1', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5816,11 +5816,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %f1, %x5
+ liveins: $x3, $f1, $x5
- %2 = COPY %x5
- %1 = COPY %f1
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $f1
+ %0 = COPY $x3
%3 = COPY %2.sub_32
%4 = ADDI %3, 1
%6 = IMPLICIT_DEF
@@ -5836,7 +5836,7 @@ body: |
STFDX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !12)
; CHECK: STFD %1, -873, %0
; CHECK-LATE: stfd 1, -873(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5864,9 +5864,9 @@ registers:
- { id: 12, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 13, class: g8rc_and_g8rc_nox0, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%f1', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$f1', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5889,11 +5889,11 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %f1, %x5
+ liveins: $x3, $f1, $x5
- %2 = COPY %x5
- %1 = COPY %f1
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $f1
+ %0 = COPY $x3
%3 = COPY %2.sub_32
%4 = ADDI %3, 1
%6 = IMPLICIT_DEF
@@ -5909,7 +5909,7 @@ body: |
%13 = STFDUX %1, %0, killed %11 :: (store 8 into %ir.arrayidx3, !tbaa !12)
; CHECK: STFDU %1, 6477, %0
; CHECK-LATE: stfdu 1, 6477(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5927,9 +5927,9 @@ registers:
- { id: 2, class: g8rc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%f1', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$f1', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -5952,16 +5952,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %f1, %x5
+ liveins: $x3, $f1, $x5
- %2 = COPY %x5
- %1 = COPY %f1
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $f1
+ %0 = COPY $x3
%3 = LI8 444
STXSSPX %1, %0, killed %3 :: (store 4 into %ir.arrayidx, !tbaa !14)
; CHECK: STXSSP %1, 444, %0
; CHECK-LATE: stxssp 1, 444(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -5979,9 +5979,9 @@ registers:
- { id: 2, class: g8rc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%f1', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$f1', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6004,16 +6004,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %f1, %x5
+ liveins: $x3, $f1, $x5
- %2 = COPY %x5
- %1 = COPY %f1
- %0 = COPY %x3
+ %2 = COPY $x5
+ %1 = COPY $f1
+ %0 = COPY $x3
%3 = LI8 4
- STXSDX %1, %0, killed %3, implicit %rm :: (store 8 into %ir.arrayidx, !tbaa !12)
+ STXSDX %1, %0, killed %3, implicit $rm :: (store 8 into %ir.arrayidx, !tbaa !12)
; CHECK: STXSD %1, 4, %0
; CHECK-LATE: stxsd 1, 4(3)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -6031,9 +6031,9 @@ registers:
- { id: 2, class: g8rc, preferred-register: '' }
- { id: 3, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%v2', virtual-reg: '%1' }
- - { reg: '%x7', virtual-reg: '%2' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$v2', virtual-reg: '%1' }
+ - { reg: '$x7', virtual-reg: '%2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6056,16 +6056,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %v2, %x7
+ liveins: $x3, $v2, $x7
- %2 = COPY %x7
- %1 = COPY %v2
+ %2 = COPY $x7
+ %1 = COPY $v2
%0 = LI8 16
%3 = RLDICR %2, 4, 59
STXVX %1, %0, killed %3 :: (store 16 into %ir.arrayidx, !tbaa !3)
; CHECK: STXV %1, 16, killed %3
; CHECK-LATE: stxv 34, 16(4)
- BLR8 implicit %lr8, implicit %rm
+ BLR8 implicit $lr8, implicit $rm
...
---
@@ -6088,10 +6088,10 @@ registers:
- { id: 7, class: gprc, preferred-register: '' }
- { id: 8, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
- - { reg: '%x6', virtual-reg: '%3' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
+ - { reg: '$x6', virtual-reg: '%3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6114,22 +6114,22 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5, %x6
+ liveins: $x3, $x4, $x5, $x6
- %3 = COPY %x6
- %2 = COPY %x5
- %1 = COPY %x4
+ %3 = COPY $x6
+ %2 = COPY $x5
+ %1 = COPY $x4
%6 = COPY %3.sub_32
%7 = COPY %2.sub_32
%8 = COPY %1.sub_32
%0 = LI 55
- %4 = SUBFC %7, %0, implicit-def %carry
+ %4 = SUBFC %7, %0, implicit-def $carry
; CHECK: SUBFIC %7, 55
; CHECK-LATE: subfic 3, 5, 55
- %5 = SUBFE %6, %8, implicit-def dead %carry, implicit %carry
- %x3 = EXTSW_32_64 %4
- %x4 = EXTSW_32_64 %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4
+ %5 = SUBFE %6, %8, implicit-def dead $carry, implicit $carry
+ $x3 = EXTSW_32_64 %4
+ $x4 = EXTSW_32_64 %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4
...
---
@@ -6149,10 +6149,10 @@ registers:
- { id: 4, class: g8rc, preferred-register: '' }
- { id: 5, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
- - { reg: '%x5', virtual-reg: '%2' }
- - { reg: '%x6', virtual-reg: '%3' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
+ - { reg: '$x5', virtual-reg: '%2' }
+ - { reg: '$x6', virtual-reg: '%3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6175,19 +6175,19 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5, %x6
+ liveins: $x3, $x4, $x5, $x6
- %3 = COPY %x6
- %2 = COPY %x5
- %1 = COPY %x4
+ %3 = COPY $x6
+ %2 = COPY $x5
+ %1 = COPY $x4
%0 = LI8 7635
- %4 = SUBFC8 %2, %0, implicit-def %carry
+ %4 = SUBFC8 %2, %0, implicit-def $carry
; CHECK: SUBFIC8 %2, 7635
; CHECK-LATE: subfic 3, 5, 7635
- %5 = SUBFE8 %3, %1, implicit-def dead %carry, implicit %carry
- %x3 = COPY %4
- %x4 = COPY %5
- BLR8 implicit %lr8, implicit %rm, implicit %x3, implicit %x4
+ %5 = SUBFE8 %3, %1, implicit-def dead $carry, implicit $carry
+ $x3 = COPY %4
+ $x4 = COPY %5
+ BLR8 implicit $lr8, implicit $rm, implicit $x3, implicit $x4
...
---
@@ -6205,8 +6205,8 @@ registers:
- { id: 2, class: gprc, preferred-register: '' }
- { id: 3, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6229,16 +6229,16 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
%1 = LI 10101
- %0 = COPY %x3
+ %0 = COPY $x3
%3 = COPY %0.sub_32
%2 = XOR %1, %3
; CHECK: XORI %3, 10101
; CHECK-LATE: 3, 3, 10101
- %x3 = EXTSW_32_64 %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -6255,8 +6255,8 @@ registers:
- { id: 1, class: g8rc, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
- - { reg: '%x4', virtual-reg: '%1' }
+ - { reg: '$x3', virtual-reg: '%0' }
+ - { reg: '$x4', virtual-reg: '%1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6279,15 +6279,15 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %1 = COPY %x4
+ %1 = COPY $x4
%0 = LI8 5535
%2 = XOR8 %1, %0
; CHECK: XORI8 %1, 5535
; CHECK-LATE: xori 3, 4, 5535
- %x3 = COPY %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -6303,7 +6303,7 @@ registers:
- { id: 0, class: gprc, preferred-register: '' }
- { id: 1, class: gprc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6326,14 +6326,14 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
%0 = LI 871
%1 = XORI %0, 17
; CHECK: LI 886
; CHECK-LATE: li 3, 886
- %x3 = EXTSW_32_64 %1
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = EXTSW_32_64 %1
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
---
@@ -6349,7 +6349,7 @@ registers:
- { id: 0, class: g8rc, preferred-register: '' }
- { id: 1, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x3', virtual-reg: '%0' }
+ - { reg: '$x3', virtual-reg: '%0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -6372,13 +6372,13 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
%0 = LI8 453
%1 = XORI8 %0, 17
; CHECK: LI8 468
; CHECK-LATE: li 3, 468
- %x3 = COPY %1
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %1
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/debuginfo-split-int.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/debuginfo-split-int.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/debuginfo-split-int.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/debuginfo-split-int.ll Wed Jan 31 14:04:26 2018
@@ -26,10 +26,10 @@ target triple = "ppc32"
; CHECK: [[DL:![0-9]+]] = !DILocalVariable(name: "result"
;
; High 32 bits in R3, low 32 bits in R4
-; CHECK: %0:gprc = COPY %r3
-; CHECK: DBG_VALUE debug-use %0, debug-use %noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 0, 32)
-; CHECK: %1:gprc = COPY %r4
-; CHECK: DBG_VALUE debug-use %1, debug-use %noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 32, 32)
+; CHECK: %0:gprc = COPY $r3
+; CHECK: DBG_VALUE debug-use %0, debug-use $noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 0, 32)
+; CHECK: %1:gprc = COPY $r4
+; CHECK: DBG_VALUE debug-use %1, debug-use $noreg, [[DL]], !DIExpression(DW_OP_LLVM_fragment, 32, 32)
define void @bar() local_unnamed_addr #0 !dbg !6 {
%1 = alloca i64, align 8
%2 = tail call i64 @foo()
Modified: llvm/trunk/test/CodeGen/PowerPC/debuginfo-stackarg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/debuginfo-stackarg.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/debuginfo-stackarg.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/debuginfo-stackarg.ll Wed Jan 31 14:04:26 2018
@@ -33,7 +33,7 @@ define i64 @foo(i64 %bar1, i64 %bar2, i6
; We expect to find a DBG_VALUE refering to the metadata id for bar5, using the lowest
; of the two fixed stack offsets found earlier.
; CHECK-LABEL: body:
-; CHECK: DBG_VALUE %r1, 0, !17, !DIExpression(DW_OP_plus_uconst, 56)
+; CHECK: DBG_VALUE $r1, 0, !17, !DIExpression(DW_OP_plus_uconst, 56)
entry:
tail call void @llvm.dbg.value(metadata i64 %bar1, metadata !13, metadata !DIExpression()), !dbg !18
tail call void @llvm.dbg.value(metadata i64 %bar2, metadata !14, metadata !DIExpression()), !dbg !19
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-1.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-1.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-1.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-1.mir Wed Jan 31 14:04:26 2018
@@ -22,8 +22,8 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x0' }
- - { reg: '%x3' }
+ - { reg: '$x0' }
+ - { reg: '$x3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -40,18 +40,18 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x0, %x3
+ liveins: $x0, $x3
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r0 = ISEL %zero, %r0, %cr0gt
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r0 = ISEL $zero, $r0, $cr0gt
; CHECK-LABEL: testExpandISEL
- ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK-NEXT: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r0 = ADDI %zero, 0
+ ; CHECK: $r0 = ADDI $zero, 0
- %x3 = EXTSW_32_64 %r0
+ $x3 = EXTSW_32_64 $r0
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-10.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-10.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-10.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-10.mir Wed Jan 31 14:04:26 2018
@@ -23,7 +23,7 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x3' }
+ - { reg: '$x3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -40,15 +40,15 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x3
+ liveins: $x3
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r3 = ISEL %r3, %r3, %cr0gt
- %x3 = EXTSW_32_64 %r3
- ; CHECK: %r5 = ADDI %r3, 1
- ; CHECK: %cr0 = CMPWI %r3, 0
- ; CHECK-NOT: %r3 = ISEL %r3, %r3, %cr0gt
- ; CHECK: %x3 = EXTSW_32_64 %r3
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r3 = ISEL $r3, $r3, $cr0gt
+ $x3 = EXTSW_32_64 $r3
+ ; CHECK: $r5 = ADDI $r3, 1
+ ; CHECK: $cr0 = CMPWI $r3, 0
+ ; CHECK-NOT: $r3 = ISEL $r3, $r3, $cr0gt
+ ; CHECK: $x3 = EXTSW_32_64 $r3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-2.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-2.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-2.mir Wed Jan 31 14:04:26 2018
@@ -22,9 +22,9 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x0' }
- - { reg: '%x3' }
- - { reg: '%x4' }
+ - { reg: '$x0' }
+ - { reg: '$x3' }
+ - { reg: '$x4' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -41,17 +41,17 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x0, %x3, %x4
+ liveins: $x0, $x3, $x4
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r3 = ISEL %zero, %r4, %cr0gt
- ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r3 = ISEL $zero, $r4, $cr0gt
+ ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK: %[[FALSE:bb.[0-9]+]]
- ; CHECK: %r3 = ORI %r4, 0
+ ; CHECK: $r3 = ORI $r4, 0
; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r3 = ADDI %zero, 0
+ ; CHECK: $r3 = ADDI $zero, 0
- %x3 = EXTSW_32_64 %r3
+ $x3 = EXTSW_32_64 $r3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-3.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-3.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-3.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-3.mir Wed Jan 31 14:04:26 2018
@@ -22,9 +22,9 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x0' }
- - { reg: '%x3' }
- - { reg: '%x4' }
+ - { reg: '$x0' }
+ - { reg: '$x3' }
+ - { reg: '$x4' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -41,18 +41,18 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x0, %x3, %x4
+ liveins: $x0, $x3, $x4
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r3 = ISEL %r4, %r0, %cr0gt
- ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r3 = ISEL $r4, $r0, $cr0gt
+ ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK: %[[FALSE:bb.[0-9]+]]
- ; CHECK: %r3 = ORI %r0, 0
+ ; CHECK: $r3 = ORI $r0, 0
; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r3 = ADDI %r4, 0
+ ; CHECK: $r3 = ADDI $r4, 0
- %x3 = EXTSW_32_64 %r3
+ $x3 = EXTSW_32_64 $r3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-4.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-4.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-4.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-4.mir Wed Jan 31 14:04:26 2018
@@ -1,5 +1,5 @@
# This file tests the scenario: ISEL R0, ZERO, RX, CR (X != 0)
-# It also tests redundant liveins (%x7) and killed registers.
+# It also tests redundant liveins ($x7) and killed registers.
# RUN: llc -ppc-gen-isel=false -run-pass ppc-expand-isel -o - %s | FileCheck %s
--- |
@@ -23,9 +23,9 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x0' }
- - { reg: '%x3' }
- - { reg: '%x7' }
+ - { reg: '$x0' }
+ - { reg: '$x3' }
+ - { reg: '$x7' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -42,18 +42,18 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x0, %x3, %x7
+ liveins: $x0, $x3, $x7
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r0 = ISEL killed %zero, killed %r5, killed %cr0gt, implicit killed %cr0
- ; CHECK: BC killed %cr0gt, %[[TRUE:bb.[0-9]+]]
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r0 = ISEL killed $zero, killed $r5, killed $cr0gt, implicit killed $cr0
+ ; CHECK: BC killed $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK: %[[FALSE:bb.[0-9]+]]
- ; CHECK: %r0 = ORI killed %r5, 0
+ ; CHECK: $r0 = ORI killed $r5, 0
; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r0 = ADDI killed %zero, 0
+ ; CHECK: $r0 = ADDI killed $zero, 0
- %x0 = EXTSW_32_64 killed %r0
+ $x0 = EXTSW_32_64 killed $r0
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-5.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-5.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-5.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-5.mir Wed Jan 31 14:04:26 2018
@@ -22,8 +22,8 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x0' }
- - { reg: '%x3' }
+ - { reg: '$x0' }
+ - { reg: '$x3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -40,15 +40,15 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x0, %x3
+ liveins: $x0, $x3
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r0 = ISEL %r5, %r0, %cr0gt
- ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r0 = ISEL $r5, $r0, $cr0gt
+ ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r0 = ADDI %r5, 0
- %x3 = EXTSW_32_64 %r0
+ ; CHECK: $r0 = ADDI $r5, 0
+ $x3 = EXTSW_32_64 $r0
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-6.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-6.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-6.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-6.mir Wed Jan 31 14:04:26 2018
@@ -23,8 +23,8 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x0' }
- - { reg: '%x3' }
+ - { reg: '$x0' }
+ - { reg: '$x3' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -41,17 +41,17 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x0, %x3
+ liveins: $x0, $x3
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r3 = ISEL %zero, %r0, %cr0gt
- ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r3 = ISEL $zero, $r0, $cr0gt
+ ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK: %[[FALSE:bb.[0-9]+]]
- ; CHECK: %r3 = ORI %r0, 0
+ ; CHECK: $r3 = ORI $r0, 0
; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r3 = ADDI %zero, 0
+ ; CHECK: $r3 = ADDI $zero, 0
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-7.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-7.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-7.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-7.mir Wed Jan 31 14:04:26 2018
@@ -22,9 +22,9 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x3' }
- - { reg: '%x4' }
- - { reg: '%x5' }
+ - { reg: '$x3' }
+ - { reg: '$x4' }
+ - { reg: '$x5' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -41,18 +41,18 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %r4 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r5 = ISEL %r3, %r4, %cr0gt
- ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ $r4 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r5 = ISEL $r3, $r4, $cr0gt
+ ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK: %[[FALSE:bb.[0-9]+]]
- ; CHECK: %r5 = ORI %r4, 0
+ ; CHECK: $r5 = ORI $r4, 0
; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r5 = ADDI %r3, 0
+ ; CHECK: $r5 = ADDI $r3, 0
- %x5 = EXTSW_32_64 %r5
+ $x5 = EXTSW_32_64 $r5
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-8.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-8.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-8.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-8.mir Wed Jan 31 14:04:26 2018
@@ -22,9 +22,9 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x3' }
- - { reg: '%x4' }
- - { reg: '%x5' }
+ - { reg: '$x3' }
+ - { reg: '$x4' }
+ - { reg: '$x5' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -41,25 +41,25 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x3, %x4, %x5
+ liveins: $x3, $x4, $x5
- %r4 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r5 = ISEL %r3, %r4, %cr0gt
- %r3 = ISEL %r4, %r5, %cr0gt
- %r4 = ISEL %r3, %r5, %cr0gt
- ; CHECK: BC %cr0gt, %[[TRUE:bb.[0-9]+]]
+ $r4 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r5 = ISEL $r3, $r4, $cr0gt
+ $r3 = ISEL $r4, $r5, $cr0gt
+ $r4 = ISEL $r3, $r5, $cr0gt
+ ; CHECK: BC $cr0gt, %[[TRUE:bb.[0-9]+]]
; CHECK: %[[FALSE:bb.[0-9]+]]
- ; CHECK: %r5 = ORI %r4, 0
- ; CHECK: %r3 = ORI %r5, 0
- ; CHECK: %r4 = ORI %r5, 0
+ ; CHECK: $r5 = ORI $r4, 0
+ ; CHECK: $r3 = ORI $r5, 0
+ ; CHECK: $r4 = ORI $r5, 0
; CHECK: B %[[SUCCESSOR:bb.[0-9]+]]
; CHECK: [[TRUE]]
- ; CHECK: %r5 = ADDI %r3, 0
- ; CHECK: %r3 = ADDI %r4, 0
- ; CHECK: %r4 = ADDI %r3, 0
+ ; CHECK: $r5 = ADDI $r3, 0
+ ; CHECK: $r3 = ADDI $r4, 0
+ ; CHECK: $r4 = ADDI $r3, 0
- %x5 = EXTSW_32_64 %r5
- %x3 = EXTSW_32_64 %r3
+ $x5 = EXTSW_32_64 $r5
+ $x3 = EXTSW_32_64 $r3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/expand-isel-9.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/expand-isel-9.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/expand-isel-9.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/expand-isel-9.mir Wed Jan 31 14:04:26 2018
@@ -23,8 +23,8 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%x3' }
- - { reg: '%x4' }
+ - { reg: '$x3' }
+ - { reg: '$x4' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -41,14 +41,14 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x3, %x4
+ liveins: $x3, $x4
- %r5 = ADDI %r3, 1
- %cr0 = CMPWI %r3, 0
- %r3 = ISEL %r4, %r4, %cr0gt
+ $r5 = ADDI $r3, 1
+ $cr0 = CMPWI $r3, 0
+ $r3 = ISEL $r4, $r4, $cr0gt
; Test fold ISEL to a copy
- ; CHECK: %r3 = OR %r4, %r4
+ ; CHECK: $r3 = OR $r4, $r4
- %x3 = EXTSW_32_64 %r3
+ $x3 = EXTSW_32_64 $r3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/fp64-to-int16.ll Wed Jan 31 14:04:26 2018
@@ -10,7 +10,7 @@ define i1 @Test(double %a) {
; CHECK-NEXT: xori 3, 3, 65534
; CHECK-NEXT: cntlzw 3, 3
; CHECK-NEXT: srwi 3, 3, 5
-; CHECK-NEXT: # implicit-def: %x4
+; CHECK-NEXT: # implicit-def: $x4
; CHECK-NEXT: mr 4, 3
; CHECK-NEXT: mr 3, 4
; CHECK-NEXT: blr
Modified: llvm/trunk/test/CodeGen/PowerPC/livephysregs.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/livephysregs.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/livephysregs.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/livephysregs.mir Wed Jan 31 14:04:26 2018
@@ -4,49 +4,49 @@
# recalculated list if okay and contains all the non-saved and saved CSRs.
# CHECK-LABEL: name: func
# CHECK: bb.3:
-# CHECK-NEXT: liveins: %x30, %x29, %x3, %x6
-# CHECK: %x4 = RLDICR killed %x6, 16, 47
-# CHECK: %x3 = OR8 killed %x4, killed %x3
-# CHECK: BLR8 implicit %lr8, implicit %rm, implicit %x3
+# CHECK-NEXT: liveins: $x30, $x29, $x3, $x6
+# CHECK: $x4 = RLDICR killed $x6, 16, 47
+# CHECK: $x3 = OR8 killed $x4, killed $x3
+# CHECK: BLR8 implicit $lr8, implicit $rm, implicit $x3
---
name: func
tracksRegLiveness: true
fixedStack:
- - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '%x30' }
- - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '%x29' }
+ - { id: 0, type: spill-slot, offset: -16, size: 8, alignment: 16, callee-saved-register: '$x30' }
+ - { id: 1, type: spill-slot, offset: -24, size: 8, alignment: 8, callee-saved-register: '$x29' }
- { id: 2, offset: -8, size: 8, alignment: 8, isImmutable: true, isAliased: false }
body: |
bb.0:
- liveins: %x3, %x5, %x29, %x30
+ liveins: $x3, $x5, $x29, $x30
- %x6 = RLWINM8 %x3, 16, 16, 31
- %x3 = RLDICL killed %x3, 0, 48
- BC undef %cr5lt, %bb.3
+ $x6 = RLWINM8 $x3, 16, 16, 31
+ $x3 = RLDICL killed $x3, 0, 48
+ BC undef $cr5lt, %bb.3
bb.1:
- liveins: %x3, %x6, %x29, %x30
+ liveins: $x3, $x6, $x29, $x30
- %x4 = RLDICR killed %x6, 16, 47
- %x3 = OR8 killed %x4, killed %x3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x4 = RLDICR killed $x6, 16, 47
+ $x3 = OR8 killed $x4, killed $x3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
bb.3:
- liveins: %x3, %x5, %x6, %x29, %x30
+ liveins: $x3, $x5, $x6, $x29, $x30
- dead %x5 = ADD8 %x5, %x6
- BC undef %cr5lt, %bb.1
+ dead $x5 = ADD8 $x5, $x6
+ BC undef $cr5lt, %bb.1
bb.6:
- liveins: %x3, %x6, %x29, %x30
- STD killed %x29, -24, %x1 :: (store 8 into %fixed-stack.1)
- STD killed %x30, -16, %x1 :: (store 8 into %fixed-stack.0, align 16)
- NOP implicit-def dead %x29
- NOP implicit-def dead %x30
+ liveins: $x3, $x6, $x29, $x30
+ STD killed $x29, -24, $x1 :: (store 8 into %fixed-stack.1)
+ STD killed $x30, -16, $x1 :: (store 8 into %fixed-stack.0, align 16)
+ NOP implicit-def dead $x29
+ NOP implicit-def dead $x30
- %x30 = LD -16, %x1 :: (load 8 from %fixed-stack.0, align 16)
- %x29 = LD -24, %x1 :: (load 8 from %fixed-stack.1)
+ $x30 = LD -16, $x1 :: (load 8 from %fixed-stack.0, align 16)
+ $x29 = LD -24, $x1 :: (load 8 from %fixed-stack.1)
- %x4 = RLDICR killed %x6, 16, 47
- %x3 = OR8 killed %x4, killed %x3
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x4 = RLDICR killed $x6, 16, 47
+ $x3 = OR8 killed $x4, killed $x3
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
...
Modified: llvm/trunk/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/no-rlwimi-trivial-commute.mir Wed Jan 31 14:04:26 2018
@@ -68,9 +68,9 @@ frameInfo:
hasMustTailInVarArgFunc: false
body: |
bb.0.entry:
- liveins: %x2
+ liveins: $x2
- %0 = ADDIStocHA %x2, @b
+ %0 = ADDIStocHA $x2, @b
%1 = LD target-flags(ppc-toc-lo) @b, killed %0 :: (load 8 from @b)
%2 = LWZ 0, %1 :: (load 4 from %ir.0)
%3 = LI 0
@@ -83,7 +83,7 @@ body: |
STW %4, 0, %1 :: (store 4 into %ir.0)
%10 = EXTSW_32_64 %8
STW %8, 0, %1 :: (store 4 into %ir.0)
- %x3 = COPY %10
- BLR8 implicit %x3, implicit %lr8, implicit %rm
+ $x3 = COPY %10
+ BLR8 implicit $x3, implicit $lr8, implicit $rm
...
Modified: llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/opt-cmp-inst-cr0-live.ll Wed Jan 31 14:04:26 2018
@@ -7,12 +7,12 @@ define signext i32 @fn1(i32 %baz) {
%2 = zext i32 %1 to i64
%3 = shl i64 %2, 48
%4 = ashr exact i64 %3, 48
-; CHECK: ANDIo8 killed {{[^,]+}}, 65520, implicit-def dead %cr0
+; CHECK: ANDIo8 killed {{[^,]+}}, 65520, implicit-def dead $cr0
; CHECK: CMPLDI
; CHECK: BCC
-; CHECK: ANDIo8 {{[^,]+}}, 65520, implicit-def %cr0
-; CHECK: COPY %cr0
+; CHECK: ANDIo8 {{[^,]+}}, 65520, implicit-def $cr0
+; CHECK: COPY $cr0
; CHECK: BCC
%5 = icmp eq i64 %4, 0
br i1 %5, label %foo, label %bar
@@ -26,8 +26,8 @@ bar:
; CHECK-LABEL: fn2
define signext i32 @fn2(i64 %a, i64 %b) {
-; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def %cr0
-; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed %cr
+; CHECK: OR8o {{[^, ]+}}, {{[^, ]+}}, implicit-def $cr0
+; CHECK: [[CREG:[^, ]+]]:crrc = COPY killed $cr
; CHECK: BCC 12, killed [[CREG]]
%1 = or i64 %b, %a
%2 = icmp sgt i64 %1, -1
@@ -42,8 +42,8 @@ bar:
; CHECK-LABEL: fn3
define signext i32 @fn3(i32 %a) {
-; CHECK: ANDIo killed {{[%0-9]+}}, 10, implicit-def %cr0
-; CHECK: [[CREG:[^, ]+]]:crrc = COPY %cr0
+; CHECK: ANDIo killed {{[%0-9]+}}, 10, implicit-def $cr0
+; CHECK: [[CREG:[^, ]+]]:crrc = COPY $cr0
; CHECK: BCC 76, killed [[CREG]]
%1 = and i32 %a, 10
%2 = icmp ne i32 %1, 0
Modified: llvm/trunk/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/opt-sub-inst-cr0-live.mir Wed Jan 31 14:04:26 2018
@@ -63,10 +63,10 @@ registers:
- { id: 26, class: g8rc_and_g8rc_nox0 }
- { id: 27, class: g8rc_and_g8rc_nox0 }
liveins:
- - { reg: '%x3', virtual-reg: '%6' }
- - { reg: '%x4', virtual-reg: '%7' }
- - { reg: '%x5', virtual-reg: '%8' }
- - { reg: '%x6', virtual-reg: '%9' }
+ - { reg: '$x3', virtual-reg: '%6' }
+ - { reg: '$x4', virtual-reg: '%7' }
+ - { reg: '$x5', virtual-reg: '%8' }
+ - { reg: '$x6', virtual-reg: '%9' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -84,12 +84,12 @@ frameInfo:
body: |
bb.0.top:
successors: %bb.1.loop
- liveins: %x3, %x4, %x5, %x6
+ liveins: $x3, $x4, $x5, $x6
- %9 = COPY %x6
- %8 = COPY %x5
- %7 = COPY %x4
- %6 = COPY %x3
+ %9 = COPY $x6
+ %8 = COPY $x5
+ %7 = COPY $x4
+ %6 = COPY $x3
%14 = COPY %9
%13 = COPY %8
%12 = COPY %7
@@ -104,8 +104,8 @@ body: |
%1 = PHI %12, %bb.0.top, %5, %bb.3.loop
%2 = PHI %13, %bb.0.top, %4, %bb.3.loop
%3 = PHI %14, %bb.0.top, %5, %bb.3.loop
- %15 = SUBFC8 %3, %1, implicit-def %carry
- %16 = SUBFE8 %2, %0, implicit-def dead %carry, implicit %carry
+ %15 = SUBFC8 %3, %1, implicit-def $carry
+ %16 = SUBFE8 %2, %0, implicit-def dead $carry, implicit $carry
%17 = ADDI8 %16, -1
%18 = ADDI8 %15, -1
%19 = ANDC8 killed %17, %16
@@ -114,8 +114,8 @@ body: |
%24 = CNTLZD killed %20
%25 = CMPLDI %15, 0
BCC 76, %25, %bb.2.loop
- ; CHECK: SUBFC8o %3, %1, implicit-def %carry, implicit-def %cr0
- ; CHECK: COPY killed %cr0
+ ; CHECK: SUBFC8o %3, %1, implicit-def $carry, implicit-def $cr0
+ ; CHECK: COPY killed $cr0
; CHECK: BCC
bb.4:
Modified: llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll (original)
+++ llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll Wed Jan 31 14:04:26 2018
@@ -14,6 +14,6 @@ entry:
; CHECK: ********** Function: foo
; CHECK: ********** FAST REGISTER ALLOCATION **********
-; CHECK: %x3 = COPY %{{[0-9]+}}
-; CHECK-NEXT: %x4 = COPY %{{[0-9]+}}
+; CHECK: $x3 = COPY %{{[0-9]+}}
+; CHECK-NEXT: $x4 = COPY %{{[0-9]+}}
; CHECK-NEXT: BLR
Modified: llvm/trunk/test/CodeGen/PowerPC/scavenging.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/scavenging.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/scavenging.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/scavenging.mir Wed Jan 31 14:04:26 2018
@@ -5,12 +5,12 @@ name: noscav0
tracksRegLiveness: true
body: |
bb.0:
- ; CHECK: [[REG0:%r[0-9]+]] = LI 42
+ ; CHECK: [[REG0:\$r[0-9]+]] = LI 42
; CHECK-NEXT: NOP implicit killed [[REG0]]
%0 : gprc = LI 42
NOP implicit %0
- ; CHECK: [[REG1:%r[0-9]+]] = LI 42
+ ; CHECK: [[REG1:\$r[0-9]+]] = LI 42
; CHECK-NEXT: NOP
; CHECK-NEXT: NOP implicit [[REG1]]
; CHECK-NEXT: NOP
@@ -21,50 +21,50 @@ body: |
NOP
NOP implicit %1
- ; CHECK: [[REG2:%r[0-9]+]] = LI 42
+ ; CHECK: [[REG2:\$r[0-9]+]] = LI 42
; CHECK-NEXT: NOP implicit [[REG2]]
%2 : gprc = LI 42
NOP implicit %2
- %x0 = IMPLICIT_DEF
- %x1 = IMPLICIT_DEF
- %x2 = IMPLICIT_DEF
- %x3 = IMPLICIT_DEF
- %x4 = IMPLICIT_DEF
- %x27 = IMPLICIT_DEF
- %x28 = IMPLICIT_DEF
- %x29 = IMPLICIT_DEF
- %x30 = IMPLICIT_DEF
-
- ; CHECK-NOT: %x0 = LI 42
- ; CHECK-NOT: %x1 = LI 42
- ; CHECK-NOT: %x2 = LI 42
- ; CHECK-NOT: %x3 = LI 42
- ; CHECK-NOT: %x4 = LI 42
- ; CHECK-NOT: %x5 = LI 42
- ; CHECK-NOT: %x27 = LI 42
- ; CHECK-NOT: %x28 = LI 42
- ; CHECK-NOT: %x29 = LI 42
- ; CHECK-NOT: %x30 = LI 42
- ; CHECK: [[REG3:%r[0-9]+]] = LI 42
- ; CHECK-NEXT: %x5 = IMPLICIT_DEF
+ $x0 = IMPLICIT_DEF
+ $x1 = IMPLICIT_DEF
+ $x2 = IMPLICIT_DEF
+ $x3 = IMPLICIT_DEF
+ $x4 = IMPLICIT_DEF
+ $x27 = IMPLICIT_DEF
+ $x28 = IMPLICIT_DEF
+ $x29 = IMPLICIT_DEF
+ $x30 = IMPLICIT_DEF
+
+ ; CHECK-NOT: $x0 = LI 42
+ ; CHECK-NOT: $x1 = LI 42
+ ; CHECK-NOT: $x2 = LI 42
+ ; CHECK-NOT: $x3 = LI 42
+ ; CHECK-NOT: $x4 = LI 42
+ ; CHECK-NOT: $x5 = LI 42
+ ; CHECK-NOT: $x27 = LI 42
+ ; CHECK-NOT: $x28 = LI 42
+ ; CHECK-NOT: $x29 = LI 42
+ ; CHECK-NOT: $x30 = LI 42
+ ; CHECK: [[REG3:\$r[0-9]+]] = LI 42
+ ; CHECK-NEXT: $x5 = IMPLICIT_DEF
; CHECK-NEXT: NOP implicit killed [[REG2]]
; CHECK-NEXT: NOP implicit killed [[REG3]]
%3 : gprc = LI 42
- %x5 = IMPLICIT_DEF
+ $x5 = IMPLICIT_DEF
NOP implicit %2
NOP implicit %3
- NOP implicit %x0
- NOP implicit %x1
- NOP implicit %x2
- NOP implicit %x3
- NOP implicit %x4
- NOP implicit %x5
- NOP implicit %x27
- NOP implicit %x28
- NOP implicit %x29
- NOP implicit %x30
+ NOP implicit $x0
+ NOP implicit $x1
+ NOP implicit $x2
+ NOP implicit $x3
+ NOP implicit $x4
+ NOP implicit $x5
+ NOP implicit $x27
+ NOP implicit $x28
+ NOP implicit $x29
+ NOP implicit $x30
...
---
# CHECK-LABEL: name: scav0
@@ -76,76 +76,76 @@ stack:
- { id: 0, type: variable-sized, offset: -32, alignment: 1 }
body: |
bb.0:
- %x0 = IMPLICIT_DEF
- %x1 = IMPLICIT_DEF
- %x2 = IMPLICIT_DEF
- %x3 = IMPLICIT_DEF
- %x4 = IMPLICIT_DEF
- %x5 = IMPLICIT_DEF
- %x6 = IMPLICIT_DEF
- %x7 = IMPLICIT_DEF
- %x8 = IMPLICIT_DEF
- %x9 = IMPLICIT_DEF
- %x10 = IMPLICIT_DEF
- %x11 = IMPLICIT_DEF
- %x12 = IMPLICIT_DEF
- %x13 = IMPLICIT_DEF
- %x14 = IMPLICIT_DEF
- %x15 = IMPLICIT_DEF
- %x16 = IMPLICIT_DEF
- %x17 = IMPLICIT_DEF
- %x18 = IMPLICIT_DEF
- %x19 = IMPLICIT_DEF
- %x20 = IMPLICIT_DEF
- %x21 = IMPLICIT_DEF
- %x22 = IMPLICIT_DEF
- %x23 = IMPLICIT_DEF
- %x24 = IMPLICIT_DEF
- %x25 = IMPLICIT_DEF
- %x26 = IMPLICIT_DEF
- %x27 = IMPLICIT_DEF
- %x28 = IMPLICIT_DEF
- %x29 = IMPLICIT_DEF
- %x30 = IMPLICIT_DEF
+ $x0 = IMPLICIT_DEF
+ $x1 = IMPLICIT_DEF
+ $x2 = IMPLICIT_DEF
+ $x3 = IMPLICIT_DEF
+ $x4 = IMPLICIT_DEF
+ $x5 = IMPLICIT_DEF
+ $x6 = IMPLICIT_DEF
+ $x7 = IMPLICIT_DEF
+ $x8 = IMPLICIT_DEF
+ $x9 = IMPLICIT_DEF
+ $x10 = IMPLICIT_DEF
+ $x11 = IMPLICIT_DEF
+ $x12 = IMPLICIT_DEF
+ $x13 = IMPLICIT_DEF
+ $x14 = IMPLICIT_DEF
+ $x15 = IMPLICIT_DEF
+ $x16 = IMPLICIT_DEF
+ $x17 = IMPLICIT_DEF
+ $x18 = IMPLICIT_DEF
+ $x19 = IMPLICIT_DEF
+ $x20 = IMPLICIT_DEF
+ $x21 = IMPLICIT_DEF
+ $x22 = IMPLICIT_DEF
+ $x23 = IMPLICIT_DEF
+ $x24 = IMPLICIT_DEF
+ $x25 = IMPLICIT_DEF
+ $x26 = IMPLICIT_DEF
+ $x27 = IMPLICIT_DEF
+ $x28 = IMPLICIT_DEF
+ $x29 = IMPLICIT_DEF
+ $x30 = IMPLICIT_DEF
- ; CHECK: STD killed [[SPILLEDREG:%x[0-9]+]]
+ ; CHECK: STD killed [[SPILLEDREG:\$x[0-9]+]]
; CHECK: [[SPILLEDREG]] = LI8 42
; CHECK: NOP implicit killed [[SPILLEDREG]]
; CHECK: [[SPILLEDREG]] = LD
%0 : g8rc = LI8 42
NOP implicit %0
- NOP implicit %x0
- NOP implicit %x1
- NOP implicit %x2
- NOP implicit %x3
- NOP implicit %x4
- NOP implicit %x5
- NOP implicit %x6
- NOP implicit %x7
- NOP implicit %x8
- NOP implicit %x9
- NOP implicit %x10
- NOP implicit %x11
- NOP implicit %x12
- NOP implicit %x13
- NOP implicit %x14
- NOP implicit %x15
- NOP implicit %x16
- NOP implicit %x17
- NOP implicit %x18
- NOP implicit %x19
- NOP implicit %x20
- NOP implicit %x21
- NOP implicit %x22
- NOP implicit %x23
- NOP implicit %x24
- NOP implicit %x25
- NOP implicit %x26
- NOP implicit %x27
- NOP implicit %x28
- NOP implicit %x29
- NOP implicit %x30
+ NOP implicit $x0
+ NOP implicit $x1
+ NOP implicit $x2
+ NOP implicit $x3
+ NOP implicit $x4
+ NOP implicit $x5
+ NOP implicit $x6
+ NOP implicit $x7
+ NOP implicit $x8
+ NOP implicit $x9
+ NOP implicit $x10
+ NOP implicit $x11
+ NOP implicit $x12
+ NOP implicit $x13
+ NOP implicit $x14
+ NOP implicit $x15
+ NOP implicit $x16
+ NOP implicit $x17
+ NOP implicit $x18
+ NOP implicit $x19
+ NOP implicit $x20
+ NOP implicit $x21
+ NOP implicit $x22
+ NOP implicit $x23
+ NOP implicit $x24
+ NOP implicit $x25
+ NOP implicit $x26
+ NOP implicit $x27
+ NOP implicit $x28
+ NOP implicit $x29
+ NOP implicit $x30
...
---
# Check for bug where we would refuse to spill before the first instruction in a
@@ -153,7 +153,7 @@ body: |
# CHECK-LABEL: name: spill_at_begin
# CHECK: bb.0:
# CHECK: liveins:
-# CHECK: STD killed [[REG:%x[0-9]+]]{{.*}}(store 8 into %stack.{{[0-9]+}})
+# CHECK: STD killed [[REG:\$x[0-9]+]]{{.*}}(store 8 into %stack.{{[0-9]+}})
# CHECK: [[REG]] = LIS8 0
# CHECK: [[REG]] = ORI8 killed [[REG]], 48
# CHECK: NOP implicit killed [[REG]]
@@ -166,41 +166,41 @@ stack:
- { id: 0, type: variable-sized, offset: -32, alignment: 1 }
body: |
bb.0:
- liveins: %x0, %x1, %x2, %x3, %x4, %x5, %x6, %x7, %x8, %x9, %x10, %x11, %x12, %x13, %x14, %x15, %x16, %x17, %x18, %x19, %x20, %x21, %x22, %x23, %x24, %x25, %x26, %x27, %x28, %x29, %x30, %x31
+ liveins: $x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7, $x8, $x9, $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17, $x18, $x19, $x20, $x21, $x22, $x23, $x24, $x25, $x26, $x27, $x28, $x29, $x30, $x31
%0 : g8rc = LIS8 0
%1 : g8rc = ORI8 %0, 48
NOP implicit %1
- NOP implicit %x0
- NOP implicit %x1
- NOP implicit %x2
- NOP implicit %x3
- NOP implicit %x4
- NOP implicit %x5
- NOP implicit %x6
- NOP implicit %x7
- NOP implicit %x8
- NOP implicit %x9
- NOP implicit %x10
- NOP implicit %x11
- NOP implicit %x12
- NOP implicit %x13
- NOP implicit %x14
- NOP implicit %x15
- NOP implicit %x16
- NOP implicit %x17
- NOP implicit %x18
- NOP implicit %x19
- NOP implicit %x20
- NOP implicit %x21
- NOP implicit %x22
- NOP implicit %x23
- NOP implicit %x24
- NOP implicit %x25
- NOP implicit %x26
- NOP implicit %x27
- NOP implicit %x28
- NOP implicit %x29
- NOP implicit %x30
- NOP implicit %x31
+ NOP implicit $x0
+ NOP implicit $x1
+ NOP implicit $x2
+ NOP implicit $x3
+ NOP implicit $x4
+ NOP implicit $x5
+ NOP implicit $x6
+ NOP implicit $x7
+ NOP implicit $x8
+ NOP implicit $x9
+ NOP implicit $x10
+ NOP implicit $x11
+ NOP implicit $x12
+ NOP implicit $x13
+ NOP implicit $x14
+ NOP implicit $x15
+ NOP implicit $x16
+ NOP implicit $x17
+ NOP implicit $x18
+ NOP implicit $x19
+ NOP implicit $x20
+ NOP implicit $x21
+ NOP implicit $x22
+ NOP implicit $x23
+ NOP implicit $x24
+ NOP implicit $x25
+ NOP implicit $x26
+ NOP implicit $x27
+ NOP implicit $x28
+ NOP implicit $x29
+ NOP implicit $x30
+ NOP implicit $x31
...
Modified: llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence1.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence1.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence1.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence1.mir Wed Jan 31 14:04:26 2018
@@ -27,7 +27,7 @@ registers:
- { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x2' }
+ - { reg: '$x2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -50,17 +50,17 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x2
- %0 = ADDIStlsgdHA %x2, @tls_var
- %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7
+ liveins: $x2
+ %0 = ADDIStlsgdHA $x2, @tls_var
+ %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead $x0, implicit-def dead $x3, implicit-def dead $x4, implicit-def dead $x5, implicit-def dead $x6, implicit-def dead $x7, implicit-def dead $x8, implicit-def dead $x9, implicit-def dead $x10, implicit-def dead $x11, implicit-def dead $x12, implicit-def dead $lr8, implicit-def dead $ctr8, implicit-def dead $cr0, implicit-def dead $cr1, implicit-def dead $cr5, implicit-def dead $cr6, implicit-def dead $cr7
%2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var)
- %x3 = COPY %2
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
; CHECK-LABEL: bb.0.entry
- ; CHECK: %[[reg1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStlsgdHA %x2, @tls_var
+ ; CHECK: %[[reg1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStlsgdHA $x2, @tls_var
; CHECK: ADJCALLSTACKDOWN 0, 0
- ; CHECK: %x3 = ADDItlsgdL %[[reg1]], @tls_var
- ; CHECK: %x3 = GETtlsADDR %x3, @tls_var
+ ; CHECK: $x3 = ADDItlsgdL %[[reg1]], @tls_var
+ ; CHECK: $x3 = GETtlsADDR $x3, @tls_var
; CHECK: ADJCALLSTACKUP 0, 0
; CHECK: BLR8
...
Modified: llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence2.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence2.mir (original)
+++ llvm/trunk/test/CodeGen/PowerPC/tls_get_addr_fence2.mir Wed Jan 31 14:04:26 2018
@@ -27,7 +27,7 @@ registers:
- { id: 1, class: g8rc_and_g8rc_nox0, preferred-register: '' }
- { id: 2, class: g8rc, preferred-register: '' }
liveins:
- - { reg: '%x2' }
+ - { reg: '$x2' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -50,14 +50,14 @@ stack:
constants:
body: |
bb.0.entry:
- liveins: %x2
- ADJCALLSTACKDOWN 32, 0, implicit-def %r1, implicit %r1
- %0 = ADDIStlsgdHA %x2, @tls_var
- %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead %x0, implicit-def dead %x3, implicit-def dead %x4, implicit-def dead %x5, implicit-def dead %x6, implicit-def dead %x7, implicit-def dead %x8, implicit-def dead %x9, implicit-def dead %x10, implicit-def dead %x11, implicit-def dead %x12, implicit-def dead %lr8, implicit-def dead %ctr8, implicit-def dead %cr0, implicit-def dead %cr1, implicit-def dead %cr5, implicit-def dead %cr6, implicit-def dead %cr7
+ liveins: $x2
+ ADJCALLSTACKDOWN 32, 0, implicit-def $r1, implicit $r1
+ %0 = ADDIStlsgdHA $x2, @tls_var
+ %1 = ADDItlsgdLADDR killed %0, @tls_var, @tls_var, implicit-def dead $x0, implicit-def dead $x3, implicit-def dead $x4, implicit-def dead $x5, implicit-def dead $x6, implicit-def dead $x7, implicit-def dead $x8, implicit-def dead $x9, implicit-def dead $x10, implicit-def dead $x11, implicit-def dead $x12, implicit-def dead $lr8, implicit-def dead $ctr8, implicit-def dead $cr0, implicit-def dead $cr1, implicit-def dead $cr5, implicit-def dead $cr6, implicit-def dead $cr7
%2 = LWZ8 0, killed %1 :: (dereferenceable load 4 from @tls_var)
- %x3 = COPY %2
- ADJCALLSTACKUP 32, 0, implicit-def %r1, implicit %r1
- BLR8 implicit %lr8, implicit %rm, implicit %x3
+ $x3 = COPY %2
+ ADJCALLSTACKUP 32, 0, implicit-def $r1, implicit $r1
+ BLR8 implicit $lr8, implicit $rm, implicit $x3
; CHECK-LABEL: bb.0.entry
; CHECK-NOT: ADJCALLSTACKDOWN 0, 0
; CHECK-NOT: ADJCALLSTACKUP 0, 0
Modified: llvm/trunk/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/RAbasic-invalid-LR-update.mir Wed Jan 31 14:04:26 2018
@@ -149,52 +149,52 @@ body: |
%11 = VGBM 0
%43 = LHIMux 0
%44 = LARL %const.0
- %45 = VL64 %44, 0, %noreg :: (load 8 from constant-pool)
+ %45 = VL64 %44, 0, $noreg :: (load 8 from constant-pool)
bb.1:
ADJCALLSTACKDOWN 0, 0
%12 = LZDR
- %f0d = COPY %12
- CallBRASL &fmod, killed %f0d, undef %f2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def %f0d
+ $f0d = COPY %12
+ CallBRASL &fmod, killed $f0d, undef $f2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $f0d
ADJCALLSTACKUP 0, 0
- KILL killed %f0d
+ KILL killed $f0d
bb.2:
- %17 = VLGVH %11, %noreg, 0
+ %17 = VLGVH %11, $noreg, 0
%19 = LHR %17.subreg_l32
undef %20.subreg_l64 = LGHI 0
%20 = DSGFR %20, %19
- %22 = VLGVH %11, %noreg, 3
+ %22 = VLGVH %11, $noreg, 3
%24 = LHR %22.subreg_l32
undef %25.subreg_l64 = LGHI 0
%25 = DSGFR %25, %24
- %31 = VLGVH %11, %noreg, 1
+ %31 = VLGVH %11, $noreg, 1
%33 = LHR %31.subreg_l32
undef %34.subreg_l64 = LGHI 0
%34 = DSGFR %34, %33
- %37 = VLGVH %11, %noreg, 2
+ %37 = VLGVH %11, $noreg, 2
%39 = LHR %37.subreg_l32
undef %40.subreg_l64 = LGHI 0
%40 = DSGFR %40, %39
- CHIMux %43, 0, implicit-def %cc
- BRC 14, 6, %bb.2, implicit killed %cc
+ CHIMux %43, 0, implicit-def $cc
+ BRC 14, 6, %bb.2, implicit killed $cc
J %bb.3
bb.3:
- WFCDB undef %46, %45, implicit-def %cc
- %48 = IPM implicit killed %cc
- %48 = AFIMux %48, 268435456, implicit-def dead %cc
+ WFCDB undef %46, %45, implicit-def $cc
+ %48 = IPM implicit killed $cc
+ %48 = AFIMux %48, 268435456, implicit-def dead $cc
%6 = RISBMux undef %6, %48, 31, 159, 35
- WFCDB undef %50, %45, implicit-def %cc
- BRC 15, 6, %bb.1, implicit killed %cc
+ WFCDB undef %50, %45, implicit-def $cc
+ BRC 15, 6, %bb.1, implicit killed $cc
J %bb.4
bb.4:
%36 = VLVGP %25.subreg_l64, %25.subreg_l64
- %36 = VLVGH %36, %20.subreg_l32, %noreg, 0
- %36 = VLVGH %36, %34.subreg_l32, %noreg, 1
- dead %36 = VLVGH %36, %40.subreg_l32, %noreg, 2
- %4 = LG undef %42, 0, %noreg :: (load 8 from `i64* undef`)
+ %36 = VLVGH %36, %20.subreg_l32, $noreg, 0
+ %36 = VLVGH %36, %34.subreg_l32, $noreg, 1
+ dead %36 = VLVGH %36, %40.subreg_l32, $noreg, 2
+ %4 = LG undef %42, 0, $noreg :: (load 8 from `i64* undef`)
undef %57.subreg_h64 = LLILL 0
undef %66.subreg_h64 = LLILL 0
undef %79.subreg_h64 = LLILL 0
@@ -204,61 +204,61 @@ body: |
bb.5:
bb.6:
- %51 = VLGVH undef %7, %noreg, 0
+ %51 = VLGVH undef %7, $noreg, 0
%53 = LLHRMux %51.subreg_l32
- %54 = VLGVH undef %1, %noreg, 0
+ %54 = VLGVH undef %1, $noreg, 0
%57.subreg_l32 = LLHRMux %54.subreg_l32
%58 = COPY %57
%58 = DLR %58, %53
- %60 = VLGVH undef %7, %noreg, 3
+ %60 = VLGVH undef %7, $noreg, 3
%62 = LLHRMux %60.subreg_l32
- %63 = VLGVH undef %1, %noreg, 3
+ %63 = VLGVH undef %1, $noreg, 3
%66.subreg_l32 = LLHRMux %63.subreg_l32
%67 = COPY %66
%67 = DLR %67, %62
- %73 = VLGVH undef %7, %noreg, 1
+ %73 = VLGVH undef %7, $noreg, 1
%75 = LLHRMux %73.subreg_l32
- %76 = VLGVH undef %1, %noreg, 1
+ %76 = VLGVH undef %1, $noreg, 1
%79.subreg_l32 = LLHRMux %76.subreg_l32
%80 = COPY %79
%80 = DLR %80, %75
- %83 = VLGVH undef %7, %noreg, 2
+ %83 = VLGVH undef %7, $noreg, 2
%85 = LLHRMux %83.subreg_l32
- %86 = VLGVH undef %1, %noreg, 2
+ %86 = VLGVH undef %1, $noreg, 2
%89.subreg_l32 = LLHRMux %86.subreg_l32
%90 = COPY %89
%90 = DLR %90, %85
- CHIMux %92, 0, implicit-def %cc
- BRC 14, 6, %bb.7, implicit killed %cc
+ CHIMux %92, 0, implicit-def $cc
+ BRC 14, 6, %bb.7, implicit killed $cc
J %bb.6
bb.7:
- CGHI undef %93, 0, implicit-def %cc
- %96 = IPM implicit killed %cc
- CGHI undef %97, 0, implicit-def %cc
- BRC 14, 6, %bb.6, implicit killed %cc
+ CGHI undef %93, 0, implicit-def $cc
+ %96 = IPM implicit killed $cc
+ CGHI undef %97, 0, implicit-def $cc
+ BRC 14, 6, %bb.6, implicit killed $cc
bb.8:
- CHIMux %6, 0, implicit-def %cc
+ CHIMux %6, 0, implicit-def $cc
%10 = LLILL 41639
- dead %10 = LOCGR %10, %4, 14, 6, implicit killed %cc
- CHIMux %92, 0, implicit-def %cc
- BRC 14, 6, %bb.5, implicit killed %cc
+ dead %10 = LOCGR %10, %4, 14, 6, implicit killed $cc
+ CHIMux %92, 0, implicit-def $cc
+ BRC 14, 6, %bb.5, implicit killed $cc
J %bb.9
bb.9:
%82 = VLVGP %67.subreg_h64, %67.subreg_h64
- %82 = VLVGH %82, %58.subreg_hl32, %noreg, 0
- %82 = VLVGH %82, %80.subreg_hl32, %noreg, 1
- dead %82 = VLVGH %82, %90.subreg_hl32, %noreg, 2
- %96 = AFIMux %96, 1879048192, implicit-def dead %cc
- %96 = SRL %96, %noreg, 31
- dead %11 = VLVGF %11, %96, %noreg, 1
+ %82 = VLVGH %82, %58.subreg_hl32, $noreg, 0
+ %82 = VLVGH %82, %80.subreg_hl32, $noreg, 1
+ dead %82 = VLVGH %82, %90.subreg_hl32, $noreg, 2
+ %96 = AFIMux %96, 1879048192, implicit-def dead $cc
+ %96 = SRL %96, $noreg, 31
+ dead %11 = VLVGF %11, %96, $noreg, 1
%100 = LHIMux 0
bb.10:
- CHIMux %100, 0, implicit-def %cc
- BRC 14, 6, %bb.10, implicit killed %cc
+ CHIMux %100, 0, implicit-def $cc
+ BRC 14, 6, %bb.10, implicit killed $cc
J %bb.11
bb.11:
Modified: llvm/trunk/test/CodeGen/SystemZ/clear-liverange-spillreg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/clear-liverange-spillreg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/clear-liverange-spillreg.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/clear-liverange-spillreg.mir Wed Jan 31 14:04:26 2018
@@ -162,15 +162,15 @@ body: |
bb.0:
successors: %bb.2(0x00000001), %bb.1(0x7fffffff)
- CHIMux undef %20, 3, implicit-def %cc
- BRC 14, 8, %bb.2, implicit killed %cc
+ CHIMux undef %20, 3, implicit-def $cc
+ BRC 14, 8, %bb.2, implicit killed $cc
J %bb.1
bb.1:
successors: %bb.2(0x00000001), %bb.3(0x7fffffff)
- CHIMux undef %21, 0, implicit-def %cc
- BRC 14, 6, %bb.3, implicit killed %cc
+ CHIMux undef %21, 0, implicit-def $cc
+ BRC 14, 6, %bb.3, implicit killed $cc
J %bb.2
bb.2:
@@ -178,15 +178,15 @@ body: |
bb.3:
successors: %bb.6(0x00000001), %bb.4(0x7fffffff)
- CHIMux undef %23, 2, implicit-def %cc
- BRC 14, 8, %bb.6, implicit killed %cc
+ CHIMux undef %23, 2, implicit-def $cc
+ BRC 14, 8, %bb.6, implicit killed $cc
J %bb.4
bb.4:
successors: %bb.5(0x00000001), %bb.7(0x7fffffff)
- CHIMux undef %24, 1, implicit-def %cc
- BRC 14, 6, %bb.7, implicit killed %cc
+ CHIMux undef %24, 1, implicit-def $cc
+ BRC 14, 6, %bb.7, implicit killed $cc
J %bb.5
bb.5:
@@ -196,48 +196,48 @@ body: |
bb.7:
successors: %bb.47(0x00000001), %bb.8(0x7fffffff)
- CHIMux undef %25, 1, implicit-def %cc
- BRC 14, 8, %bb.47, implicit killed %cc
+ CHIMux undef %25, 1, implicit-def $cc
+ BRC 14, 8, %bb.47, implicit killed $cc
J %bb.8
bb.8:
successors: %bb.46(0x00000001), %bb.48(0x7fffffff)
- CHIMux undef %26, 2, implicit-def %cc
- BRC 14, 8, %bb.46, implicit killed %cc
+ CHIMux undef %26, 2, implicit-def $cc
+ BRC 14, 8, %bb.46, implicit killed $cc
J %bb.48
bb.9:
successors: %bb.36(0x00000001), %bb.10(0x7fffffff)
- CHIMux undef %31, 1, implicit-def %cc
- BRC 14, 8, %bb.36, implicit killed %cc
+ CHIMux undef %31, 1, implicit-def $cc
+ BRC 14, 8, %bb.36, implicit killed $cc
J %bb.10
bb.10:
successors: %bb.35(0x00000001), %bb.37(0x7fffffff)
- CHIMux undef %32, 2, implicit-def %cc
- BRC 14, 8, %bb.35, implicit killed %cc
+ CHIMux undef %32, 2, implicit-def $cc
+ BRC 14, 8, %bb.35, implicit killed $cc
J %bb.37
bb.11:
%4 = COPY %60
- %6 = SLLG %120, %noreg, 1
+ %6 = SLLG %120, $noreg, 1
%7 = LA %6, 64, %41
- %6 = AGR %6, %42, implicit-def dead %cc
- %45 = SRLK %120.subreg_l32, %noreg, 31
- %45 = AR %45, %120.subreg_l32, implicit-def dead %cc
- %45 = NIFMux %45, 536870910, implicit-def dead %cc
- %47 = SRK %120.subreg_l32, %45, implicit-def dead %cc
- %47 = SLL %47, %noreg, 3
+ %6 = AGR %6, %42, implicit-def dead $cc
+ %45 = SRLK %120.subreg_l32, $noreg, 31
+ %45 = AR %45, %120.subreg_l32, implicit-def dead $cc
+ %45 = NIFMux %45, 536870910, implicit-def dead $cc
+ %47 = SRK %120.subreg_l32, %45, implicit-def dead $cc
+ %47 = SLL %47, $noreg, 3
%81 = LGFR %47
bb.12:
successors: %bb.56, %bb.13
- CHIMux %38, 0, implicit-def %cc
- BRC 14, 8, %bb.13, implicit killed %cc
+ CHIMux %38, 0, implicit-def $cc
+ BRC 14, 8, %bb.13, implicit killed $cc
bb.56:
J %bb.16
@@ -247,24 +247,24 @@ body: |
ADJCALLSTACKDOWN 0, 0
%49 = LGFR %120.subreg_l32
- %r2d = COPY %49
- CallBRASL @Get_Direct_Cost8x8, killed %r2d, undef %r3d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def %r2d
+ $r2d = COPY %49
+ CallBRASL @Get_Direct_Cost8x8, killed $r2d, undef $r3d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def $r2d
ADJCALLSTACKUP 0, 0
- %51 = COPY killed %r2d
+ %51 = COPY killed $r2d
MVHHI %7, 0, 0 :: (store 2)
- %12 = ARK %51.subreg_l32, %125, implicit-def dead %cc
- CFIMux %51.subreg_l32, 2147483647, implicit-def %cc
- %12 = LOCRMux %12, %126, 14, 8, implicit killed %cc
- CFIMux %125, 2147483647, implicit-def %cc
- %12 = LOCRMux %12, %126, 14, 8, implicit killed %cc
- CHIMux undef %56, 0, implicit-def %cc
- BRC 14, 6, %bb.15, implicit killed %cc
+ %12 = ARK %51.subreg_l32, %125, implicit-def dead $cc
+ CFIMux %51.subreg_l32, 2147483647, implicit-def $cc
+ %12 = LOCRMux %12, %126, 14, 8, implicit killed $cc
+ CFIMux %125, 2147483647, implicit-def $cc
+ %12 = LOCRMux %12, %126, 14, 8, implicit killed $cc
+ CHIMux undef %56, 0, implicit-def $cc
+ BRC 14, 6, %bb.15, implicit killed $cc
J %bb.14
bb.14:
- %124 = AHIMux %124, 1, implicit-def dead %cc
+ %124 = AHIMux %124, 1, implicit-def dead $cc
ADJCALLSTACKDOWN 0, 0
- CallBRASL @store_coding_state, undef %r2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc
+ CallBRASL @store_coding_state, undef $r2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc
ADJCALLSTACKUP 0, 0
%125 = COPY %12
J %bb.16
@@ -274,8 +274,8 @@ body: |
bb.16:
successors: %bb.12(0x7c000000), %bb.17(0x04000000)
- CLGFI undef %59, 4, implicit-def %cc
- BRC 14, 4, %bb.12, implicit killed %cc
+ CLGFI undef %59, 4, implicit-def $cc
+ BRC 14, 4, %bb.12, implicit killed $cc
J %bb.17
bb.17:
@@ -283,44 +283,44 @@ body: |
MVHI %0, 332, 2 :: (store 4)
%60 = COPY %126
- %60 = AR %60, %4, implicit-def dead %cc
- %18 = LHMux %6, 0, %noreg :: (load 2)
- CHIMux %38, 0, implicit-def %cc
- BRC 14, 6, %bb.19, implicit killed %cc
+ %60 = AR %60, %4, implicit-def dead $cc
+ %18 = LHMux %6, 0, $noreg :: (load 2)
+ CHIMux %38, 0, implicit-def $cc
+ BRC 14, 6, %bb.19, implicit killed $cc
J %bb.18
bb.18:
- %62 = SLLG %81, %noreg, 1
+ %62 = SLLG %81, $noreg, 1
%64 = LA %62, 0, %63
- %65 = LG undef %66, 0, %noreg :: (load 8)
- %67 = LGF undef %68, 0, %noreg :: (load 4)
+ %65 = LG undef %66, 0, $noreg :: (load 8)
+ %67 = LGF undef %68, 0, $noreg :: (load 4)
MVC undef %69, 0, 2, %64, 0 :: (store 2), (load 2)
%70 = COPY %81
- %70 = OILL64 %70, 3, implicit-def dead %cc
- %71 = LA %70, 2, %noreg
- %72 = SLLG %71, %noreg, 1
+ %70 = OILL64 %70, 3, implicit-def dead $cc
+ %71 = LA %70, 2, $noreg
+ %72 = SLLG %71, $noreg, 1
%73 = LHMux %72, 0, %63 :: (load 2)
%74 = LA %70, 2, %67
- %75 = SLLG %74, %noreg, 1
- %76 = LG %65, 0, %noreg :: (load 8)
+ %75 = SLLG %74, $noreg, 1
+ %76 = LG %65, 0, $noreg :: (load 8)
STHMux %73, %76, 0, %75 :: (store 2)
- %77 = LG undef %78, 0, %noreg :: (load 8)
+ %77 = LG undef %78, 0, $noreg :: (load 8)
%79 = LHRL @rec_mbY8x8 :: (load 2)
- STHMux %79, %77, 0, %noreg :: (store 2)
+ STHMux %79, %77, 0, $noreg :: (store 2)
%80 = LHMux %72, 0, %63 :: (load 2)
STHMux %80, %77, 0, %75 :: (store 2)
- %81 = OILL64 %81, 7, implicit-def dead %cc
- %82 = SLLG %81, %noreg, 1
+ %81 = OILL64 %81, 7, implicit-def dead $cc
+ %82 = SLLG %81, $noreg, 1
%83 = LHMux %82, 0, %63 :: (load 2)
- STHMux %83, %77, 0, %noreg :: (store 2)
+ STHMux %83, %77, 0, $noreg :: (store 2)
%84 = LA %62, 64, %63
MVC undef %85, 0, 2, %84, 0 :: (store 2), (load 2)
- %86 = SLLG %70, %noreg, 1
+ %86 = SLLG %70, $noreg, 1
%87 = LHMux %86, 64, %63 :: (load 2)
- %88 = SLLG %67, %noreg, 3
+ %88 = SLLG %67, $noreg, 3
%89 = LG %65, 16, %88 :: (load 8)
%90 = LA %70, 0, %67
- %91 = SLLG %90, %noreg, 1
+ %91 = SLLG %90, $noreg, 1
STHMux %87, %89, 0, %91 :: (store 2)
%92 = LA %72, 64, %63
MVC undef %93, 0, 2, %92, 0 :: (store 2), (load 2)
@@ -332,39 +332,39 @@ body: |
bb.19:
successors: %bb.20(0x04000000), %bb.11(0x7c000000)
- %98 = LGH %7, 0, %noreg :: (load 2)
- %99 = LGH undef %100, 0, %noreg :: (load 2)
+ %98 = LGH %7, 0, $noreg :: (load 2)
+ %99 = LGH undef %100, 0, $noreg :: (load 2)
ADJCALLSTACKDOWN 0, 0
%101 = LGFR %120.subreg_l32
%102 = LGFR %18
- %r2d = COPY %101
- %r3d = COPY %102
- %r4d = LGHI 0
- %r5d = COPY %98
- %r6d = COPY %99
- CallBRASL @SetRefAndMotionVectors, killed %r2d, killed %r3d, killed %r4d, killed %r5d, killed %r6d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc
+ $r2d = COPY %101
+ $r3d = COPY %102
+ $r4d = LGHI 0
+ $r5d = COPY %98
+ $r6d = COPY %99
+ CallBRASL @SetRefAndMotionVectors, killed $r2d, killed $r3d, killed $r4d, killed $r5d, killed $r6d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc
ADJCALLSTACKUP 0, 0
ADJCALLSTACKDOWN 0, 0
- CallBRASL @reset_coding_state, undef %r2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc
+ CallBRASL @reset_coding_state, undef $r2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc
ADJCALLSTACKUP 0, 0
- %120 = LA %120, 1, %noreg
- CGHI %120, 4, implicit-def %cc
- BRC 14, 6, %bb.11, implicit killed %cc
+ %120 = LA %120, 1, $noreg
+ CGHI %120, 4, implicit-def $cc
+ BRC 14, 6, %bb.11, implicit killed $cc
J %bb.20
bb.20:
successors: %bb.22(0x00000001), %bb.21(0x7fffffff)
MVHI undef %105, 0, 0 :: (store 4)
- CHIMux undef %106, 3, implicit-def %cc
- BRC 14, 8, %bb.22, implicit killed %cc
+ CHIMux undef %106, 3, implicit-def $cc
+ BRC 14, 8, %bb.22, implicit killed $cc
J %bb.21
bb.21:
successors: %bb.22(0x00000001), %bb.23(0x7fffffff)
- CHIMux undef %107, 0, implicit-def %cc
- BRC 14, 6, %bb.23, implicit killed %cc
+ CHIMux undef %107, 0, implicit-def $cc
+ BRC 14, 6, %bb.23, implicit killed $cc
J %bb.22
bb.22:
@@ -373,21 +373,21 @@ body: |
successors: %bb.26(0x00000001), %bb.24(0x7fffffff)
ADJCALLSTACKDOWN 0, 0
- CallBRASL @Get_Direct_CostMB, undef %f0d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def dead %r2d
+ CallBRASL @Get_Direct_CostMB, undef $f0d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def dead $r2d
ADJCALLSTACKUP 0, 0
ADJCALLSTACKDOWN 0, 0
- %r2d = LGHI 0
- CallBRASL @SetModesAndRefframeForBlocks, killed %r2d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc
+ $r2d = LGHI 0
+ CallBRASL @SetModesAndRefframeForBlocks, killed $r2d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc
ADJCALLSTACKUP 0, 0
- CHIMux undef %111, 13, implicit-def %cc
- BRC 14, 8, %bb.26, implicit killed %cc
+ CHIMux undef %111, 13, implicit-def $cc
+ BRC 14, 8, %bb.26, implicit killed $cc
J %bb.24
bb.24:
successors: %bb.25(0x00000001), %bb.27(0x7fffffff)
- CHIMux undef %112, 8, implicit-def %cc
- BRC 14, 6, %bb.27, implicit killed %cc
+ CHIMux undef %112, 8, implicit-def $cc
+ BRC 14, 6, %bb.27, implicit killed $cc
J %bb.25
bb.25:
@@ -397,8 +397,8 @@ body: |
bb.27:
successors: %bb.28, %bb.29
- CHIMux undef %114, 0, implicit-def %cc
- BRC 14, 6, %bb.29, implicit killed %cc
+ CHIMux undef %114, 0, implicit-def $cc
+ BRC 14, 6, %bb.29, implicit killed $cc
bb.28:
%130 = CDFBR %60
@@ -410,16 +410,16 @@ body: |
bb.30:
successors: %bb.33(0x00000001), %bb.31(0x7fffffff)
- VST64 %130, undef %117, 0, %noreg :: (store 8)
- CHIMux undef %118, 2, implicit-def %cc
- BRC 14, 8, %bb.33, implicit killed %cc
+ VST64 %130, undef %117, 0, $noreg :: (store 8)
+ CHIMux undef %118, 2, implicit-def $cc
+ BRC 14, 8, %bb.33, implicit killed $cc
J %bb.31
bb.31:
successors: %bb.32(0x00000001), %bb.34(0x7fffffff)
- CHIMux undef %119, 1, implicit-def %cc
- BRC 14, 6, %bb.34, implicit killed %cc
+ CHIMux undef %119, 1, implicit-def $cc
+ BRC 14, 6, %bb.34, implicit killed $cc
J %bb.32
bb.32:
@@ -436,15 +436,15 @@ body: |
bb.37:
successors: %bb.40(0x00000001), %bb.38(0x7fffffff)
- CHIMux undef %33, 1, implicit-def %cc
- BRC 14, 8, %bb.40, implicit killed %cc
+ CHIMux undef %33, 1, implicit-def $cc
+ BRC 14, 8, %bb.40, implicit killed $cc
J %bb.38
bb.38:
successors: %bb.39(0x00000001), %bb.41(0x7fffffff)
- CHIMux undef %34, 2, implicit-def %cc
- BRC 14, 6, %bb.41, implicit killed %cc
+ CHIMux undef %34, 2, implicit-def $cc
+ BRC 14, 6, %bb.41, implicit killed $cc
J %bb.39
bb.39:
@@ -454,15 +454,15 @@ body: |
bb.41:
successors: %bb.44(0x00000001), %bb.42(0x7fffffff)
- CHIMux undef %35, 1, implicit-def %cc
- BRC 14, 8, %bb.44, implicit killed %cc
+ CHIMux undef %35, 1, implicit-def $cc
+ BRC 14, 8, %bb.44, implicit killed $cc
J %bb.42
bb.42:
successors: %bb.43(0x00000001), %bb.45(0x7fffffff)
- CHIMux undef %36, 2, implicit-def %cc
- BRC 14, 6, %bb.45, implicit killed %cc
+ CHIMux undef %36, 2, implicit-def $cc
+ BRC 14, 6, %bb.45, implicit killed $cc
J %bb.43
bb.43:
@@ -470,7 +470,7 @@ body: |
bb.44:
bb.45:
- %0 = LG undef %22, 0, %noreg :: (load 8)
+ %0 = LG undef %22, 0, $noreg :: (load 8)
%38 = LHIMux 0
STRL %38, @bi_pred_me :: (store 4)
%120 = LGHI 0
@@ -490,15 +490,15 @@ body: |
bb.48:
successors: %bb.51(0x00000001), %bb.49(0x7fffffff)
- CHIMux undef %27, 1, implicit-def %cc
- BRC 14, 8, %bb.51, implicit killed %cc
+ CHIMux undef %27, 1, implicit-def $cc
+ BRC 14, 8, %bb.51, implicit killed $cc
J %bb.49
bb.49:
successors: %bb.50(0x00000001), %bb.52(0x7fffffff)
- CHIMux undef %28, 2, implicit-def %cc
- BRC 14, 6, %bb.52, implicit killed %cc
+ CHIMux undef %28, 2, implicit-def $cc
+ BRC 14, 6, %bb.52, implicit killed $cc
J %bb.50
bb.50:
@@ -508,15 +508,15 @@ body: |
bb.52:
successors: %bb.55(0x00000001), %bb.53(0x7fffffff)
- CHIMux undef %29, 1, implicit-def %cc
- BRC 14, 8, %bb.55, implicit killed %cc
+ CHIMux undef %29, 1, implicit-def $cc
+ BRC 14, 8, %bb.55, implicit killed $cc
J %bb.53
bb.53:
successors: %bb.54(0x00000001), %bb.9(0x7fffffff)
- CHIMux undef %30, 2, implicit-def %cc
- BRC 14, 6, %bb.9, implicit killed %cc
+ CHIMux undef %30, 2, implicit-def $cc
+ BRC 14, 6, %bb.9, implicit killed $cc
J %bb.54
bb.54:
Modified: llvm/trunk/test/CodeGen/SystemZ/cond-move-04.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/cond-move-04.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/cond-move-04.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/cond-move-04.mir Wed Jan 31 14:04:26 2018
@@ -61,14 +61,14 @@ body: |
%5 = LHIMux 10
bb.1 (%ir-block.2):
- CHIMux %3, 0, implicit-def %cc
- %0 = LOCRMux undef %0, %5, 14, 6, implicit %cc
- %0 = LOCRMux %0, %2, 14, 6, implicit killed %cc
+ CHIMux %3, 0, implicit-def $cc
+ %0 = LOCRMux undef %0, %5, 14, 6, implicit $cc
+ %0 = LOCRMux %0, %2, 14, 6, implicit killed $cc
ADJCALLSTACKDOWN 0, 0
%7 = LGFR %0
- %r3d = LGHI 0
- %r4d = COPY %7
- CallBRASL @foo, undef %r2d, killed %r3d, killed %r4d, csr_systemz, implicit-def dead %r14d, implicit-def dead %cc, implicit-def dead %r2d
+ $r3d = LGHI 0
+ $r4d = COPY %7
+ CallBRASL @foo, undef $r2d, killed $r3d, killed $r4d, csr_systemz, implicit-def dead $r14d, implicit-def dead $cc, implicit-def dead $r2d
ADJCALLSTACKUP 0, 0
J %bb.1
Modified: llvm/trunk/test/CodeGen/SystemZ/cond-move-05.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/cond-move-05.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/cond-move-05.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/cond-move-05.mir Wed Jan 31 14:04:26 2018
@@ -67,10 +67,10 @@ body: |
undef %3.subreg_l64:gr128bit = LGHI 1
%3.subreg_h64:gr128bit = LLILL 0
%3:gr128bit = DLGR %3, %0
- CLFIMux %3.subreg_hl32, 3631842929, implicit-def %cc
- %6:grx32bit = LOCRMux undef %6, %3.subreg_hl32, 14, 4, implicit killed %cc
- CHIMux %6, 0, implicit-def %cc
- BRC 14, 8, %bb.2.for.inc591.1.i.i, implicit killed %cc
+ CLFIMux %3.subreg_hl32, 3631842929, implicit-def $cc
+ %6:grx32bit = LOCRMux undef %6, %3.subreg_hl32, 14, 4, implicit killed $cc
+ CHIMux %6, 0, implicit-def $cc
+ BRC 14, 8, %bb.2.for.inc591.1.i.i, implicit killed $cc
J %bb.1.cleanup584.i.i
bb.1.cleanup584.i.i:
Modified: llvm/trunk/test/CodeGen/SystemZ/fp-cmp-07.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/fp-cmp-07.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/fp-cmp-07.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/fp-cmp-07.mir Wed Jan 31 14:04:26 2018
@@ -24,21 +24,21 @@
name: f15
tracksRegLiveness: true
liveins:
- - { reg: '%f0s', virtual-reg: '' }
- - { reg: '%r2d', virtual-reg: '' }
+ - { reg: '$f0s', virtual-reg: '' }
+ - { reg: '$r2d', virtual-reg: '' }
body: |
bb.0.entry:
- liveins: %f0s, %r2d
+ liveins: $f0s, $r2d
- LTEBRCompare %f0s, %f0s, implicit-def %cc
- %f2s = LER %f0s
- INLINEASM &"blah $0", 1, 9, %f2s
- CondReturn 15, 4, implicit %f0s, implicit %cc
+ LTEBRCompare $f0s, $f0s, implicit-def $cc
+ $f2s = LER $f0s
+ INLINEASM &"blah $0", 1, 9, $f2s
+ CondReturn 15, 4, implicit $f0s, implicit $cc
bb.1.store:
- liveins: %f0s, %r2d
+ liveins: $f0s, $r2d
- STE %f0s, killed %r2d, 0, %noreg :: (store 4 into %ir.dest)
- Return implicit %f0s
+ STE $f0s, killed $r2d, 0, $noreg :: (store 4 into %ir.dest)
+ Return implicit $f0s
...
Modified: llvm/trunk/test/CodeGen/SystemZ/fp-conv-17.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/fp-conv-17.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/fp-conv-17.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/fp-conv-17.mir Wed Jan 31 14:04:26 2018
@@ -121,82 +121,82 @@ registers:
- { id: 34, class: fp64bit }
- { id: 35, class: fp64bit }
liveins:
- - { reg: '%r2d', virtual-reg: '%0' }
- - { reg: '%r3d', virtual-reg: '%1' }
+ - { reg: '$r2d', virtual-reg: '%0' }
+ - { reg: '$r3d', virtual-reg: '%1' }
body: |
bb.0 (%ir-block.0):
- liveins: %r2d, %r3d
+ liveins: $r2d, $r3d
- %1 = COPY %r3d
- %0 = COPY %r2d
- %2 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %3 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %4 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %5 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %6 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %7 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %8 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %9 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %10 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %11 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %12 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %13 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %14 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %15 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %16 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %17 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- %18 = LE %1, 0, %noreg :: (volatile load 4 from %ir.ptr2)
- STE %2, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %3, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %4, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %5, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %6, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %7, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %8, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %9, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %10, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %11, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %12, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %13, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %14, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %15, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %16, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %17, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
- STE %18, %1, 0, %noreg :: (volatile store 4 into %ir.ptr2)
+ %1 = COPY $r3d
+ %0 = COPY $r2d
+ %2 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %3 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %4 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %5 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %6 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %7 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %8 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %9 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %10 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %11 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %12 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %13 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %14 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %15 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %16 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %17 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ %18 = LE %1, 0, $noreg :: (volatile load 4 from %ir.ptr2)
+ STE %2, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %3, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %4, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %5, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %6, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %7, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %8, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %9, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %10, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %11, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %12, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %13, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %14, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %15, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %16, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %17, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
+ STE %18, %1, 0, $noreg :: (volatile store 4 into %ir.ptr2)
%19 = LDEBR %2
- STD %19, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %19, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%20 = LDEBR %3
- STD %20, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %20, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%21 = LDEBR %4
- STD %21, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %21, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%22 = LDEBR %5
- STD %22, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %22, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%23 = LDEBR %6
- STD %23, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %23, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%24 = LDEBR %7
- STD %24, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %24, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%25 = LDEBR %8
- STD %25, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %25, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%26 = LDEBR %9
- STD %26, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %26, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%27 = LDEBR %10
- STD %27, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %27, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%28 = LDEBR %11
- STD %28, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %28, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%29 = LDEBR %12
- STD %29, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %29, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%30 = LDEBR %13
- STD %30, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %30, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%31 = LDEBR %14
- STD %31, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %31, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%32 = LDEBR %15
- STD %32, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %32, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%33 = LDEBR %16
- STD %33, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %33, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%34 = LDEBR %17
- STD %34, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %34, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
%35 = LDEBR %18
- STD %35, %0, 0, %noreg :: (volatile store 8 into %ir.ptr1)
+ STD %35, %0, 0, $noreg :: (volatile store 8 into %ir.ptr1)
Return
...
Modified: llvm/trunk/test/CodeGen/SystemZ/load-and-test.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/load-and-test.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/load-and-test.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/load-and-test.mir Wed Jan 31 14:04:26 2018
@@ -13,18 +13,18 @@ name: fun0
tracksRegLiveness: true
body: |
bb.0 ():
- liveins: %r1d
- renamable %r0l = L %r1d, 0, %noreg
- CLFIMux killed renamable %r0l, 0, implicit-def %cc
- BRC 14, 10, %bb.2, implicit %cc
+ liveins: $r1d
+ renamable $r0l = L $r1d, 0, $noreg
+ CLFIMux killed renamable $r0l, 0, implicit-def $cc
+ BRC 14, 10, %bb.2, implicit $cc
bb.1 ():
- liveins: %r0l
- ST killed renamable %r0l, %r15d, 164, %noreg
+ liveins: $r0l
+ ST killed renamable $r0l, $r15d, 164, $noreg
bb.2 ():
- liveins: %r0l
- ST killed renamable %r0l, %r15d, 164, %noreg
+ liveins: $r0l
+ ST killed renamable $r0l, $r15d, 164, $noreg
Return
...
@@ -36,17 +36,17 @@ name: fun1
tracksRegLiveness: true
body: |
bb.0 ():
- liveins: %r1d
- renamable %r0l = L %r1d, 0, %noreg
- CLFIMux killed renamable %r0l, 0, implicit-def %cc
- BRC 14, 8, %bb.2, implicit %cc
+ liveins: $r1d
+ renamable $r0l = L $r1d, 0, $noreg
+ CLFIMux killed renamable $r0l, 0, implicit-def $cc
+ BRC 14, 8, %bb.2, implicit $cc
bb.1 ():
- liveins: %r0l
- ST killed renamable %r0l, %r15d, 164, %noreg
+ liveins: $r0l
+ ST killed renamable $r0l, $r15d, 164, $noreg
bb.2 ():
- liveins: %r0l
- ST killed renamable %r0l, %r15d, 164, %noreg
+ liveins: $r0l
+ ST killed renamable $r0l, $r15d, 164, $noreg
Return
...
Modified: llvm/trunk/test/CodeGen/SystemZ/lower-copy-undef-src.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/lower-copy-undef-src.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/lower-copy-undef-src.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/lower-copy-undef-src.mir Wed Jan 31 14:04:26 2018
@@ -5,10 +5,10 @@
# dropped.
---
# CHECK-LABEL: name: undef_copy
-# CHECK: %r13d = KILL undef %r0d, implicit killed %r12q, implicit-def %r12q
+# CHECK: $r13d = KILL undef $r0d, implicit killed $r12q, implicit-def $r12q
name: undef_copy
tracksRegLiveness: true
body: |
bb.0:
- liveins: %r12q
- %r13d = COPY undef %r0d, implicit killed %r12q, implicit-def %r12q
+ liveins: $r12q
+ $r13d = COPY undef $r0d, implicit killed $r12q, implicit-def $r12q
Modified: llvm/trunk/test/CodeGen/SystemZ/pr32505.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/pr32505.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/pr32505.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/pr32505.ll Wed Jan 31 14:04:26 2018
@@ -10,8 +10,8 @@ define <2 x float> @pr32505(<2 x i8> * %
; CHECK-NEXT: lbh %r1, 0(%r2)
; CHECK-NEXT: ldgr %f0, %r1
; CHECK-NEXT: ldgr %f2, %r0
-; CHECK-NEXT: # kill: def %f0s killed %f0s killed %f0d
-; CHECK-NEXT: # kill: def %f2s killed %f2s killed %f2d
+; CHECK-NEXT: # kill: def $f0s killed $f0s killed $f0d
+; CHECK-NEXT: # kill: def $f2s killed $f2s killed $f2d
; CHECK-NEXT: br %r14
%L17 = load <2 x i8>, <2 x i8>* %a
%Se21 = sext <2 x i8> %L17 to <2 x i32>
Modified: llvm/trunk/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir (original)
+++ llvm/trunk/test/CodeGen/SystemZ/regalloc-fast-invalid-kill-flag.mir Wed Jan 31 14:04:26 2018
@@ -17,18 +17,18 @@ registers:
- { id: 0, class: gr128bit }
- { id: 1, class: gr64bit }
- { id: 2, class: addr64bit }
-# CHECK: %r0q = L128
-# CHECK-NEXT: %r0l = COPY renamable %r1l
+# CHECK: $r0q = L128
+# CHECK-NEXT: $r0l = COPY renamable $r1l
# Although R0L partially redefines R0Q, it must not mark R0Q as kill
# because R1D is still live through that instruction.
-# CHECK-NOT: implicit killed %r0q
-# CHECK-NEXT: %r2d = COPY renamable %r1d
+# CHECK-NOT: implicit killed $r0q
+# CHECK-NEXT: $r2d = COPY renamable $r1d
# CHECK-NEXT: LARL
body: |
bb.0:
%0.subreg_hl32 = COPY %0.subreg_l32
%1 = COPY %0.subreg_l64
%2 = LARL @g_167
- STC %1.subreg_l32, %2, 8, %noreg
+ STC %1.subreg_l32, %2, 8, $noreg
...
Modified: llvm/trunk/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll (original)
+++ llvm/trunk/test/CodeGen/SystemZ/store_nonbytesized_vecs.ll Wed Jan 31 14:04:26 2018
@@ -80,7 +80,7 @@ define void @fun2(<8 x i32> %src, <8 x i
; CHECK-NEXT: vlgvf %r1, %v24, 0
; CHECK-NEXT: stc %r1, 30(%r2)
; CHECK-NEXT: llgtr %r0, %r1
-; CHECK-NEXT: # kill: def %r1l killed %r1l killed %r1d def %r1d
+; CHECK-NEXT: # kill: def $r1l killed $r1l killed $r1d def $r1d
; CHECK-NEXT: srl %r1, 8
; CHECK-NEXT: sth %r1, 28(%r2)
; CHECK-NEXT: vlgvf %r1, %v24, 1
Modified: llvm/trunk/test/CodeGen/Thumb/machine-cse-physreg.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb/machine-cse-physreg.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb/machine-cse-physreg.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb/machine-cse-physreg.mir Wed Jan 31 14:04:26 2018
@@ -3,9 +3,9 @@
# This is a contrived example made to expose a bug in
# MachineCSE, see PR32538.
-# MachineCSE must not remove this def of %cpsr:
+# MachineCSE must not remove this def of $cpsr:
# CHECK-LABEL: bb.1:
-# CHECK: , %cpsr = tLSLri
+# CHECK: , $cpsr = tLSLri
...
---
@@ -16,20 +16,20 @@ registers:
- { id: 2, class: tgpr }
- { id: 3, class: tgpr }
liveins:
- - { reg: '%r0', virtual-reg: '%0' }
+ - { reg: '$r0', virtual-reg: '%0' }
body: |
bb.0:
- liveins: %r0
- %0 = COPY %r0
- %1, %cpsr = tLSLri %0, 2, 14, %noreg
- tCMPi8 %0, 5, 14, %noreg, implicit-def %cpsr
- tBcc %bb.8, 8, %cpsr
+ liveins: $r0
+ %0 = COPY $r0
+ %1, $cpsr = tLSLri %0, 2, 14, $noreg
+ tCMPi8 %0, 5, 14, $noreg, implicit-def $cpsr
+ tBcc %bb.8, 8, $cpsr
bb.1:
- %2, %cpsr = tLSLri %0, 2, 14, %noreg
+ %2, $cpsr = tLSLri %0, 2, 14, $noreg
bb.8:
- liveins: %cpsr
- %3 = COPY %cpsr
- tSTRi killed %3, %0, 0, 14, %noreg
+ liveins: $cpsr
+ %3 = COPY $cpsr
+ tSTRi killed %3, %0, 0, 14, $noreg
...
Modified: llvm/trunk/test/CodeGen/Thumb/tbb-reuse.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb/tbb-reuse.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb/tbb-reuse.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb/tbb-reuse.mir Wed Jan 31 14:04:26 2018
@@ -63,21 +63,21 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%r0' }
-calleeSavedRegisters: [ '%lr', '%d8', '%d9', '%d10', '%d11', '%d12', '%d13',
- '%d14', '%d15', '%q4', '%q5', '%q6', '%q7', '%r4',
- '%r5', '%r6', '%r7', '%r8', '%r9', '%r10', '%r11',
- '%s16', '%s17', '%s18', '%s19', '%s20', '%s21',
- '%s22', '%s23', '%s24', '%s25', '%s26', '%s27',
- '%s28', '%s29', '%s30', '%s31', '%d8_d10', '%d9_d11',
- '%d10_d12', '%d11_d13', '%d12_d14', '%d13_d15',
- '%q4_q5', '%q5_q6', '%q6_q7', '%q4_q5_q6_q7', '%r4_r5',
- '%r6_r7', '%r8_r9', '%r10_r11', '%d8_d9_d10', '%d9_d10_d11',
- '%d10_d11_d12', '%d11_d12_d13', '%d12_d13_d14',
- '%d13_d14_d15', '%d8_d10_d12', '%d9_d11_d13', '%d10_d12_d14',
- '%d11_d13_d15', '%d8_d10_d12_d14', '%d9_d11_d13_d15',
- '%d9_d10', '%d11_d12', '%d13_d14', '%d9_d10_d11_d12',
- '%d11_d12_d13_d14' ]
+ - { reg: '$r0' }
+calleeSavedRegisters: [ '$lr', '$d8', '$d9', '$d10', '$d11', '$d12', '$d13',
+ '$d14', '$d15', '$q4', '$q5', '$q6', '$q7', '$r4',
+ '$r5', '$r6', '$r7', '$r8', '$r9', '$r10', '$r11',
+ '$s16', '$s17', '$s18', '$s19', '$s20', '$s21',
+ '$s22', '$s23', '$s24', '$s25', '$s26', '$s27',
+ '$s28', '$s29', '$s30', '$s31', '$d8_d10', '$d9_d11',
+ '$d10_d12', '$d11_d13', '$d12_d14', '$d13_d15',
+ '$q4_q5', '$q5_q6', '$q6_q7', '$q4_q5_q6_q7', '$r4_r5',
+ '$r6_r7', '$r8_r9', '$r10_r11', '$d8_d9_d10', '$d9_d10_d11',
+ '$d10_d11_d12', '$d11_d12_d13', '$d12_d13_d14',
+ '$d13_d14_d15', '$d8_d10_d12', '$d9_d11_d13', '$d10_d12_d14',
+ '$d11_d13_d15', '$d8_d10_d12_d14', '$d9_d11_d13_d15',
+ '$d9_d10', '$d11_d12', '$d13_d14', '$d9_d10_d11_d12',
+ '$d11_d12_d13_d14' ]
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -93,8 +93,8 @@ frameInfo:
hasVAStart: false
hasMustTailInVarArgFunc: false
stack:
- - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '%lr', callee-saved-restored: false }
- - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '%r7' }
+ - { id: 0, type: spill-slot, offset: -4, size: 4, alignment: 4, callee-saved-register: '$lr', callee-saved-restored: false }
+ - { id: 1, type: spill-slot, offset: -8, size: 4, alignment: 4, callee-saved-register: '$r7' }
jumpTable:
kind: inline
entries:
@@ -106,46 +106,46 @@ jumpTable:
body: |
bb.0.entry:
successors: %bb.2.default(0x19999998), %bb.1.entry(0x66666668)
- liveins: %r0, %r7, %lr
+ liveins: $r0, $r7, $lr
- frame-setup tPUSH 14, %noreg, killed %r7, killed %lr, implicit-def %sp, implicit %sp
+ frame-setup tPUSH 14, $noreg, killed $r7, killed $lr, implicit-def $sp, implicit $sp
frame-setup CFI_INSTRUCTION def_cfa_offset 8
- frame-setup CFI_INSTRUCTION offset %lr, -4
- frame-setup CFI_INSTRUCTION offset %r7, -8
- %r1, dead %cpsr = tSUBi3 %r0, 1, 14, %noreg
- tCMPi8 %r1, 3, 14, %noreg, implicit-def %cpsr
- tBcc %bb.2.default, 8, killed %cpsr
+ frame-setup CFI_INSTRUCTION offset $lr, -4
+ frame-setup CFI_INSTRUCTION offset $r7, -8
+ $r1, dead $cpsr = tSUBi3 $r0, 1, 14, $noreg
+ tCMPi8 $r1, 3, 14, $noreg, implicit-def $cpsr
+ tBcc %bb.2.default, 8, killed $cpsr
bb.1.entry:
successors: %bb.3.lab1(0x20000000), %bb.4.lab2(0x20000000), %bb.5.lab3(0x20000000), %bb.6.lab4(0x20000000)
- liveins: %r0, %r1
+ liveins: $r0, $r1
- %r1, dead %cpsr = tLSLri killed %r1, 2, 14, %noreg
- %r2 = tLEApcrelJT %jump-table.0, 14, %noreg
- %r2 = tLDRr killed %r1, killed %r2, 14, %noreg :: (load 4 from jump-table)
- %r1, dead %cpsr = tLSLri %r2, 2, 14, %noreg
- tBR_JTr killed %r2, %jump-table.0
+ $r1, dead $cpsr = tLSLri killed $r1, 2, 14, $noreg
+ $r2 = tLEApcrelJT %jump-table.0, 14, $noreg
+ $r2 = tLDRr killed $r1, killed $r2, 14, $noreg :: (load 4 from jump-table)
+ $r1, dead $cpsr = tLSLri $r2, 2, 14, $noreg
+ tBR_JTr killed $r2, %jump-table.0
bb.2.default:
- tBL 14, %noreg, @exit0, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, $noreg, @exit0, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp
bb.3.lab1:
- liveins: %r0,%r1
+ liveins: $r0,$r1
- tBL 14, %noreg, @exit1, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit %r0, implicit-def %sp
- tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, $noreg, @exit1, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit $r0, implicit-def $sp
+ tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp
bb.4.lab2:
- tBL 14, %noreg, @exit2, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, $noreg, @exit2, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp
bb.5.lab3:
- tBL 14, %noreg, @exit3, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, $noreg, @exit3, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp
bb.6.lab4:
- tBL 14, %noreg, @exit4, csr_aapcs, implicit-def dead %lr, implicit %sp, implicit-def %sp
- tPOP_RET 14, %noreg, def %r7, def %pc, implicit-def %sp, implicit %sp
+ tBL 14, $noreg, @exit4, csr_aapcs, implicit-def dead $lr, implicit $sp, implicit-def $sp
+ tPOP_RET 14, $noreg, def $r7, def $pc, implicit-def $sp, implicit $sp
...
Modified: llvm/trunk/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/ifcvt-neon-deprecated.mir Wed Jan 31 14:04:26 2018
@@ -4,25 +4,25 @@ name: NeonVdupMul
body: |
bb.0:
successors: %bb.2, %bb.1
- liveins: %d0, %r0, %r1
+ liveins: $d0, $r0, $r1
- t2CMPri killed %r1, 0, 14, %noreg, implicit-def %cpsr
- t2Bcc %bb.2, 0, killed %cpsr
+ t2CMPri killed $r1, 0, 14, $noreg, implicit-def $cpsr
+ t2Bcc %bb.2, 0, killed $cpsr
bb.1:
- liveins: %d0, %r0
+ liveins: $d0, $r0
- %d16 = VDUP32d killed %r0, 14, %noreg
+ $d16 = VDUP32d killed $r0, 14, $noreg
; Verify that the neon instructions haven't been conditionalized:
; CHECK-LABEL: NeonVdupMul
; CHECK: vdup.32
; CHECK: vmul.i32
- %d0 = VMULv2i32 killed %d16, killed %d0, 14, %noreg
+ $d0 = VMULv2i32 killed $d16, killed $d0, 14, $noreg
bb.2:
- liveins: %d0
+ liveins: $d0
- tBX_RET 14, %noreg, implicit %d0
+ tBX_RET 14, $noreg, implicit $d0
...
---
@@ -30,25 +30,25 @@ name: NeonVmovVfpLdr
body: |
bb.0.entry:
successors: %bb.1, %bb.2
- liveins: %r0, %r1
+ liveins: $r0, $r1
- t2CMPri killed %r1, 0, 14, %noreg, implicit-def %cpsr
- t2Bcc %bb.2, 1, killed %cpsr
+ t2CMPri killed $r1, 0, 14, $noreg, implicit-def $cpsr
+ t2Bcc %bb.2, 1, killed $cpsr
bb.1:
- %d0 = VMOVv2i32 0, 14, %noreg
- tBX_RET 14, %noreg, implicit %d0
+ $d0 = VMOVv2i32 0, 14, $noreg
+ tBX_RET 14, $noreg, implicit $d0
bb.2:
- liveins: %r0
+ liveins: $r0
- %d0 = VLDRD killed %r0, 0, 14, %noreg
+ $d0 = VLDRD killed $r0, 0, 14, $noreg
; Verify that the neon instruction VMOVv2i32 hasn't been conditionalized,
; but the VLDR instruction that is available both in the VFP and Advanced
; SIMD extensions has.
; CHECK-LABEL: NeonVmovVfpLdr
; CHECK-DAG: vmov.i32 d0, #0x0
; CHECK-DAG: vldr{{ne|eq}} d0, [r0]
- tBX_RET 14, %noreg, implicit %d0
+ tBX_RET 14, $noreg, implicit $d0
...
Modified: llvm/trunk/test/CodeGen/Thumb2/t2sizereduction.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/t2sizereduction.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/t2sizereduction.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/t2sizereduction.mir Wed Jan 31 14:04:26 2018
@@ -36,48 +36,48 @@
name: test
tracksRegLiveness: true
liveins:
- - { reg: '%r0', virtual-reg: '' }
- - { reg: '%r1', virtual-reg: '' }
+ - { reg: '$r0', virtual-reg: '' }
+ - { reg: '$r1', virtual-reg: '' }
body: |
; CHECK-LABEL: name: test
; CHECK: bb.0.entry:
; CHECK: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; CHECK: liveins: %r0, %r1
- ; CHECK: %r2 = tMOVr %r0, 14, %noreg
- ; CHECK: %r0, dead %cpsr = tMOVi8 1, 14, %noreg
- ; CHECK: tCMPi8 %r1, 1, 14, %noreg, implicit-def %cpsr
- ; CHECK: t2Bcc %bb.2, 11, killed %cpsr
+ ; CHECK: liveins: $r0, $r1
+ ; CHECK: $r2 = tMOVr $r0, 14, $noreg
+ ; CHECK: $r0, dead $cpsr = tMOVi8 1, 14, $noreg
+ ; CHECK: tCMPi8 $r1, 1, 14, $noreg, implicit-def $cpsr
+ ; CHECK: t2Bcc %bb.2, 11, killed $cpsr
; CHECK: bb.1.for.body:
; CHECK: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; CHECK: liveins: %r0, %r1, %r2
- ; CHECK: %r0, dead %cpsr = tMUL %r2, killed %r0, 14, %noreg
- ; CHECK: %r2, dead %cpsr = tADDi8 killed %r2, 1, 14, %noreg
- ; CHECK: %r1, %cpsr = tSUBi8 killed %r1, 1, 14, %noreg
- ; CHECK: t2Bcc %bb.1, 1, killed %cpsr
+ ; CHECK: liveins: $r0, $r1, $r2
+ ; CHECK: $r0, dead $cpsr = tMUL $r2, killed $r0, 14, $noreg
+ ; CHECK: $r2, dead $cpsr = tADDi8 killed $r2, 1, 14, $noreg
+ ; CHECK: $r1, $cpsr = tSUBi8 killed $r1, 1, 14, $noreg
+ ; CHECK: t2Bcc %bb.1, 1, killed $cpsr
; CHECK: bb.2.for.cond.cleanup:
- ; CHECK: liveins: %r0
- ; CHECK: tBX_RET 14, %noreg, implicit %r0
+ ; CHECK: liveins: $r0
+ ; CHECK: tBX_RET 14, $noreg, implicit $r0
bb.0.entry:
successors: %bb.1.for.body, %bb.2.for.cond.cleanup
- liveins: %r0, %r1
+ liveins: $r0, $r1
- %r2 = tMOVr %r0, 14, _
- %r0 = t2MOVi 1, 14, _, _
- t2CMPri %r1, 1, 14, _, implicit-def %cpsr
- t2Bcc %bb.2.for.cond.cleanup, 11, killed %cpsr
+ $r2 = tMOVr $r0, 14, _
+ $r0 = t2MOVi 1, 14, _, _
+ t2CMPri $r1, 1, 14, _, implicit-def $cpsr
+ t2Bcc %bb.2.for.cond.cleanup, 11, killed $cpsr
bb.1.for.body:
successors: %bb.2.for.cond.cleanup, %bb.1.for.body
- liveins: %r0, %r1, %r2
+ liveins: $r0, $r1, $r2
- %r0 = t2MUL %r2, killed %r0, 14, _
- %r2 = t2ADDri killed %r2, 1, 14, _, _
- %r1 = t2SUBri killed %r1, 1, 14, _, def %cpsr
- t2Bcc %bb.1.for.body, 1, killed %cpsr
+ $r0 = t2MUL $r2, killed $r0, 14, _
+ $r2 = t2ADDri killed $r2, 1, 14, _, _
+ $r1 = t2SUBri killed $r1, 1, 14, _, def $cpsr
+ t2Bcc %bb.1.for.body, 1, killed $cpsr
bb.2.for.cond.cleanup:
- liveins: %r0
+ liveins: $r0
- tBX_RET 14, _, implicit %r0
+ tBX_RET 14, _, implicit $r0
...
Modified: llvm/trunk/test/CodeGen/Thumb2/tbb-removeadd.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/tbb-removeadd.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/tbb-removeadd.mir (original)
+++ llvm/trunk/test/CodeGen/Thumb2/tbb-removeadd.mir Wed Jan 31 14:04:26 2018
@@ -44,8 +44,8 @@ regBankSelected: false
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%r0' }
- - { reg: '%r1' }
+ - { reg: '$r0' }
+ - { reg: '$r1' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -75,49 +75,49 @@ jumpTable:
body: |
bb.0.entry:
successors: %bb.6.sw.epilog(0x0ccccccb), %bb.1.entry(0x73333335)
- liveins: %r0, %r1
+ liveins: $r0, $r1
- tCMPi8 %r0, 4, 14, %noreg, implicit-def %cpsr
- t2Bcc %bb.6.sw.epilog, 8, killed %cpsr
+ tCMPi8 $r0, 4, 14, $noreg, implicit-def $cpsr
+ t2Bcc %bb.6.sw.epilog, 8, killed $cpsr
bb.1.entry:
successors: %bb.2.sw.bb(0x1c71c71c), %bb.3.sw.bb1(0x1c71c71c), %bb.5.sw.epilog.sink.split(0x1c71c71c), %bb.6.sw.epilog(0x0e38e38e), %bb.4.sw.bb3(0x1c71c71c)
- liveins: %r0, %r1
+ liveins: $r0, $r1
- %r2 = t2LEApcrelJT %jump-table.0, 14, %noreg
- %r3 = t2ADDrs killed %r2, %r0, 18, 14, %noreg, %noreg
- %r2, dead %cpsr = tMOVi8 1, 14, %noreg
- t2BR_JT killed %r3, killed %r0, %jump-table.0
+ $r2 = t2LEApcrelJT %jump-table.0, 14, $noreg
+ $r3 = t2ADDrs killed $r2, $r0, 18, 14, $noreg, $noreg
+ $r2, dead $cpsr = tMOVi8 1, 14, $noreg
+ t2BR_JT killed $r3, killed $r0, %jump-table.0
bb.2.sw.bb:
successors: %bb.5.sw.epilog.sink.split(0x80000000)
- liveins: %r1
+ liveins: $r1
- %r2, dead %cpsr = tMOVi8 0, 14, %noreg
- t2B %bb.5.sw.epilog.sink.split, 14, %noreg
+ $r2, dead $cpsr = tMOVi8 0, 14, $noreg
+ t2B %bb.5.sw.epilog.sink.split, 14, $noreg
bb.3.sw.bb1:
successors: %bb.5.sw.epilog.sink.split(0x80000000)
- liveins: %r1
+ liveins: $r1
- %r0, dead %cpsr = tMOVi8 0, 14, %noreg
- %r2, dead %cpsr = tMOVi8 1, 14, %noreg
- tSTRi killed %r0, %r1, 0, 14, %noreg :: (store 4 into %ir.p)
- t2B %bb.5.sw.epilog.sink.split, 14, %noreg
+ $r0, dead $cpsr = tMOVi8 0, 14, $noreg
+ $r2, dead $cpsr = tMOVi8 1, 14, $noreg
+ tSTRi killed $r0, $r1, 0, 14, $noreg :: (store 4 into %ir.p)
+ t2B %bb.5.sw.epilog.sink.split, 14, $noreg
bb.4.sw.bb3:
successors: %bb.5.sw.epilog.sink.split(0x80000000)
- liveins: %r1
+ liveins: $r1
- %r2, dead %cpsr = tMOVi8 2, 14, %noreg
+ $r2, dead $cpsr = tMOVi8 2, 14, $noreg
bb.5.sw.epilog.sink.split:
successors: %bb.6.sw.epilog(0x80000000)
- liveins: %r1, %r2
+ liveins: $r1, $r2
- tSTRi killed %r2, killed %r1, 0, 14, %noreg :: (store 4 into %ir.p)
+ tSTRi killed $r2, killed $r1, 0, 14, $noreg :: (store 4 into %ir.p)
bb.6.sw.epilog:
- tBX_RET 14, %noreg
+ tBX_RET 14, $noreg
...
Modified: llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll Wed Jan 31 14:04:26 2018
@@ -11,7 +11,7 @@ define void @handle_vector_size_attribut
; CHECK-NEXT: # %bb.1: # %cond_next129
; CHECK-NEXT: movb 0, %al
; CHECK-NEXT: movzbl %al, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax def %ax
+; CHECK-NEXT: # kill: def $eax killed $eax def $ax
; CHECK-NEXT: divb %al
; CHECK-NEXT: movzbl %al, %eax
; CHECK-NEXT: cmpq %rax, %rax
Modified: llvm/trunk/test/CodeGen/X86/2010-05-28-Crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-05-28-Crash.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-05-28-Crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-05-28-Crash.ll Wed Jan 31 14:04:26 2018
@@ -45,7 +45,7 @@ entry:
!18 = !DIFile(filename: "f.c", directory: "/tmp")
!19 = !{}
-;CHECK: DEBUG_VALUE: bar:x <- %e
+;CHECK: DEBUG_VALUE: bar:x <- $e
;CHECK: Ltmp
;CHECK: DEBUG_VALUE: foo:y <- 1{{$}}
!20 = !{i32 1, !"Debug Info Version", i32 3}
Modified: llvm/trunk/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll (original)
+++ llvm/trunk/test/CodeGen/X86/2010-06-01-DeadArg-DbgInfo.ll Wed Jan 31 14:04:26 2018
@@ -11,7 +11,7 @@ target triple = "x86_64-apple-darwin10.2
; Function Attrs: noinline nounwind optsize readnone ssp
define i32 @_ZN3foo3bazEi(%struct.foo* nocapture %this, i32 %x) #0 align 2 !dbg !4 {
entry:
- ; CHECK: DEBUG_VALUE: baz:this <- %rdi{{$}}
+ ; CHECK: DEBUG_VALUE: baz:this <- $rdi{{$}}
tail call void @llvm.dbg.value(metadata %struct.foo* %this, i64 0, metadata !13, metadata !16), !dbg !17
tail call void @llvm.dbg.value(metadata i32 %x, i64 0, metadata !18, metadata !16), !dbg !17
%0 = mul nsw i32 %x, 7, !dbg !19
Modified: llvm/trunk/test/CodeGen/X86/3addr-or.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/3addr-or.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/3addr-or.ll (original)
+++ llvm/trunk/test/CodeGen/X86/3addr-or.ll Wed Jan 31 14:04:26 2018
@@ -5,7 +5,7 @@
define i32 @test1(i32 %x) nounwind ssp {
; CHECK-LABEL: test1:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: shll $5, %edi
; CHECK-NEXT: leal 3(%rdi), %eax
; CHECK-NEXT: retq
@@ -20,7 +20,7 @@ define i32 @test1(i32 %x) nounwind ssp {
define i64 @test2(i8 %A, i8 %B) nounwind {
; CHECK-LABEL: test2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: shll $4, %edi
; CHECK-NEXT: andl $48, %edi
; CHECK-NEXT: movzbl %sil, %eax
@@ -55,8 +55,8 @@ define void @test3(i32 %x, i32* %P) noun
define i32 @test4(i32 %a, i32 %b) nounwind readnone ssp {
; CHECK-LABEL: test4:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
-; CHECK-NEXT: # kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
+; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
; CHECK-NEXT: andl $6, %edi
; CHECK-NEXT: andl $16, %esi
; CHECK-NEXT: leal (%rsi,%rdi), %eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/add-scalar.ll Wed Jan 31 14:04:26 2018
@@ -28,8 +28,8 @@ define i64 @test_add_i64(i64 %arg1, i64
define i32 @test_add_i32(i32 %arg1, i32 %arg2) {
; X64-LABEL: test_add_i32:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: leal (%rsi,%rdi), %eax
; X64-NEXT: retq
;
@@ -45,10 +45,10 @@ define i32 @test_add_i32(i32 %arg1, i32
define i16 @test_add_i16(i16 %arg1, i16 %arg2) {
; X64-LABEL: test_add_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: leal (%rsi,%rdi), %eax
-; X64-NEXT: # kill: def %ax killed %ax killed %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
; X64-NEXT: retq
;
; X32-LABEL: test_add_i16:
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/ext-x86-64.ll Wed Jan 31 14:04:26 2018
@@ -6,7 +6,7 @@
define i64 @test_zext_i1(i8 %a) {
; X64-LABEL: test_zext_i1:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %edi killed %edi def %rdi
+; X64-NEXT: # kill: def $edi killed $edi def $rdi
; X64-NEXT: andq $1, %rdi
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/ext.ll Wed Jan 31 14:04:26 2018
@@ -13,7 +13,7 @@ define i8 @test_zext_i1toi8(i32 %a) {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andb $1, %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i8
@@ -31,7 +31,7 @@ define i16 @test_zext_i1toi16(i32 %a) {
; X32: # %bb.0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andw $1, %ax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
%val = trunc i32 %a to i1
%r = zext i1 %val to i16
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/gep.ll Wed Jan 31 14:04:26 2018
@@ -13,7 +13,7 @@ define i32* @test_gep_i8(i32 *%arr, i8 %
;
; X64-LABEL: test_gep_i8:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: movsbq %sil, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
@@ -47,7 +47,7 @@ define i32* @test_gep_i16(i32 *%arr, i16
;
; X64-LABEL: test_gep_i16:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-NEXT: # kill: def $esi killed $esi def $rsi
; X64-NEXT: movswq %si, %rax
; X64-NEXT: leaq (%rdi,%rax,4), %rax
; X64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/irtranslator-callingconv.ll Wed Jan 31 14:04:26 2018
@@ -31,22 +31,22 @@ define i8 @test_i8_args_8(i8 %arg1, i8 %
; X32: G_STORE [[LOAD]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
; X32: G_STORE [[LOAD6]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X32: G_STORE [[LOAD7]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
- ; X32: %al = COPY [[LOAD]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[LOAD]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_i8_args_8
; X64: bb.1.entry:
- ; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; X64: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
- ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY %ecx
+ ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx
; X64: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[COPY3]](s32)
- ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY %r8d
+ ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d
; X64: [[TRUNC4:%[0-9]+]]:_(s8) = G_TRUNC [[COPY4]](s32)
- ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY %r9d
+ ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d
; X64: [[TRUNC5:%[0-9]+]]:_(s8) = G_TRUNC [[COPY5]](s32)
; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.1, align 0)
@@ -58,8 +58,8 @@ define i8 @test_i8_args_8(i8 %arg1, i8 %
; X64: G_STORE [[TRUNC]](s8), [[GV]](p0) :: (store 1 into @a1_8bit)
; X64: G_STORE [[LOAD]](s8), [[GV1]](p0) :: (store 1 into @a7_8bit)
; X64: G_STORE [[LOAD1]](s8), [[GV2]](p0) :: (store 1 into @a8_8bit)
- ; X64: %al = COPY [[TRUNC]](s8)
- ; X64: RET 0, implicit %al
+ ; X64: $al = COPY [[TRUNC]](s8)
+ ; X64: RET 0, implicit $al
i8 %arg5, i8 %arg6, i8 %arg7, i8 %arg8) {
entry:
store i8 %arg1, i8* @a1_8bit
@@ -97,17 +97,17 @@ define i32 @test_i32_args_8(i32 %arg1, i
; X32: G_STORE [[LOAD]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
; X32: G_STORE [[LOAD6]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
; X32: G_STORE [[LOAD7]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
- ; X32: %eax = COPY [[LOAD]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[LOAD]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_i32_args_8
; X64: bb.1.entry:
- ; X64: liveins: %ecx, %edi, %edx, %esi, %r8d, %r9d
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
- ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY %ecx
- ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY %r8d
- ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY %r9d
+ ; X64: liveins: $ecx, $edi, $edx, $esi, $r8d, $r9d
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; X64: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
+ ; X64: [[COPY3:%[0-9]+]]:_(s32) = COPY $ecx
+ ; X64: [[COPY4:%[0-9]+]]:_(s32) = COPY $r8d
+ ; X64: [[COPY5:%[0-9]+]]:_(s32) = COPY $r9d
; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X64: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -118,8 +118,8 @@ define i32 @test_i32_args_8(i32 %arg1, i
; X64: G_STORE [[COPY]](s32), [[GV]](p0) :: (store 4 into @a1_32bit)
; X64: G_STORE [[LOAD]](s32), [[GV1]](p0) :: (store 4 into @a7_32bit)
; X64: G_STORE [[LOAD1]](s32), [[GV2]](p0) :: (store 4 into @a8_32bit)
- ; X64: %eax = COPY [[COPY]](s32)
- ; X64: RET 0, implicit %eax
+ ; X64: $eax = COPY [[COPY]](s32)
+ ; X64: RET 0, implicit $eax
i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8) {
entry:
store i32 %arg1, i32* @a1_32bit
@@ -182,18 +182,18 @@ define i64 @test_i64_args_8(i64 %arg1, i
; X32: G_STORE [[MV6]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit, align 4)
; X32: G_STORE [[MV7]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit, align 4)
; X32: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[MV]](s64)
- ; X32: %eax = COPY [[UV]](s32)
- ; X32: %edx = COPY [[UV1]](s32)
- ; X32: RET 0, implicit %eax, implicit %edx
+ ; X32: $eax = COPY [[UV]](s32)
+ ; X32: $edx = COPY [[UV1]](s32)
+ ; X32: RET 0, implicit $eax, implicit $edx
; X64-LABEL: name: test_i64_args_8
; X64: bb.1.entry:
- ; X64: liveins: %rcx, %rdi, %rdx, %rsi, %r8, %r9
- ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi
- ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
- ; X64: [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx
- ; X64: [[COPY3:%[0-9]+]]:_(s64) = COPY %rcx
- ; X64: [[COPY4:%[0-9]+]]:_(s64) = COPY %r8
- ; X64: [[COPY5:%[0-9]+]]:_(s64) = COPY %r9
+ ; X64: liveins: $rcx, $rdi, $rdx, $rsi, $r8, $r9
+ ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
+ ; X64: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx
+ ; X64: [[COPY3:%[0-9]+]]:_(s64) = COPY $rcx
+ ; X64: [[COPY4:%[0-9]+]]:_(s64) = COPY $r8
+ ; X64: [[COPY5:%[0-9]+]]:_(s64) = COPY $r9
; X64: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
; X64: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 0)
; X64: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
@@ -204,8 +204,8 @@ define i64 @test_i64_args_8(i64 %arg1, i
; X64: G_STORE [[COPY]](s64), [[GV]](p0) :: (store 8 into @a1_64bit)
; X64: G_STORE [[LOAD]](s64), [[GV1]](p0) :: (store 8 into @a7_64bit)
; X64: G_STORE [[LOAD1]](s64), [[GV2]](p0) :: (store 8 into @a8_64bit)
- ; X64: %rax = COPY [[COPY]](s64)
- ; X64: RET 0, implicit %rax
+ ; X64: $rax = COPY [[COPY]](s64)
+ ; X64: RET 0, implicit $rax
i64 %arg5, i64 %arg6, i64 %arg7, i64 %arg8) {
; ... a bunch more that we don't track ...
entry:
@@ -222,15 +222,15 @@ define float @test_float_args(float %arg
; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: %fp0 = COPY [[LOAD1]](s32)
- ; X32: RET 0, implicit %fp0
+ ; X32: $fp0 = COPY [[LOAD1]](s32)
+ ; X32: RET 0, implicit $fp0
; X64-LABEL: name: test_float_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
- ; X64: %xmm0 = COPY [[COPY1]](s32)
- ; X64: RET 0, implicit %xmm0
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
+ ; X64: $xmm0 = COPY [[COPY1]](s32)
+ ; X64: RET 0, implicit $xmm0
ret float %arg2
}
@@ -241,57 +241,57 @@ define double @test_double_args(double %
; X32: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 8 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 8 from %fixed-stack.0, align 0)
- ; X32: %fp0 = COPY [[LOAD1]](s64)
- ; X32: RET 0, implicit %fp0
+ ; X32: $fp0 = COPY [[LOAD1]](s64)
+ ; X32: RET 0, implicit $fp0
; X64-LABEL: name: test_double_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
- ; X64: %xmm0 = COPY [[COPY1]](s64)
- ; X64: RET 0, implicit %xmm0
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
+ ; X64: $xmm0 = COPY [[COPY1]](s64)
+ ; X64: RET 0, implicit $xmm0
ret double %arg2
}
define <4 x i32> @test_v4i32_args(<4 x i32> %arg1, <4 x i32> %arg2) {
; X32-LABEL: name: test_v4i32_args
; X32: bb.1 (%ir-block.0):
- ; X32: liveins: %xmm0, %xmm1
- ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X32: %xmm0 = COPY [[COPY1]](<4 x s32>)
- ; X32: RET 0, implicit %xmm0
+ ; X32: liveins: $xmm0, $xmm1
+ ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X32: $xmm0 = COPY [[COPY1]](<4 x s32>)
+ ; X32: RET 0, implicit $xmm0
; X64-LABEL: name: test_v4i32_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X64: %xmm0 = COPY [[COPY1]](<4 x s32>)
- ; X64: RET 0, implicit %xmm0
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X64: $xmm0 = COPY [[COPY1]](<4 x s32>)
+ ; X64: RET 0, implicit $xmm0
ret <4 x i32> %arg2
}
define <8 x i32> @test_v8i32_args(<8 x i32> %arg1) {
; X32-LABEL: name: test_v8i32_args
; X32: bb.1 (%ir-block.0):
- ; X32: liveins: %xmm0, %xmm1
- ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X32: liveins: $xmm0, $xmm1
+ ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>)
- ; X32: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X32: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X32: RET 0, implicit %xmm0, implicit %xmm1
+ ; X32: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X32: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X32: RET 0, implicit $xmm0, implicit $xmm1
; X64-LABEL: name: test_v8i32_args
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1
- ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X64: liveins: $xmm0, $xmm1
+ ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV]](<8 x s32>)
- ; X64: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X64: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X64: RET 0, implicit %xmm0, implicit %xmm1
+ ; X64: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X64: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X64: RET 0, implicit $xmm0, implicit $xmm1
ret <8 x i32> %arg1
}
@@ -307,19 +307,19 @@ entry:
}
define i32 * @test_memop_i32(i32 * %p1) {
-;X64 liveins: %rdi
+;X64 liveins: $rdi
; X32-LABEL: name: test_memop_i32
; X32: bb.1 (%ir-block.0):
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: %eax = COPY [[LOAD]](p0)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[LOAD]](p0)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_memop_i32
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; X64: %rax = COPY [[COPY]](p0)
- ; X64: RET 0, implicit %rax
+ ; X64: liveins: $rdi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; X64: $rax = COPY [[COPY]](p0)
+ ; X64: RET 0, implicit $rax
ret i32 * %p1;
}
@@ -327,15 +327,15 @@ declare void @trivial_callee()
define void @test_trivial_call() {
; X32-LABEL: name: test_trivial_call
; X32: bb.1 (%ir-block.0):
- ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: CALLpcrel32 @trivial_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: CALLpcrel32 @trivial_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_trivial_call
; X64: bb.1 (%ir-block.0):
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: CALL64pcrel32 @trivial_callee, csr_64, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: CALL64pcrel32 @trivial_callee, csr_64, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void @trivial_callee()
ret void
@@ -349,28 +349,28 @@ define void @test_simple_arg(i32 %in0, i
; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
; X32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD1]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0)
- ; X32: CALLpcrel32 @simple_arg_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @simple_arg_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_simple_arg
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %edi, %esi
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %edi = COPY [[COPY1]](s32)
- ; X64: %esi = COPY [[COPY]](s32)
- ; X64: CALL64pcrel32 @simple_arg_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: liveins: $edi, $esi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; X64: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $edi = COPY [[COPY1]](s32)
+ ; X64: $esi = COPY [[COPY]](s32)
+ ; X64: CALL64pcrel32 @simple_arg_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void @simple_arg_callee(i32 %in1, i32 %in0)
ret void
@@ -382,63 +382,63 @@ define void @test_simple_arg8_call(i32 %
; X32: bb.1 (%ir-block.0):
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 32, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0)
- ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C2]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP2]](p0) :: (store 4 into stack + 8, align 0)
- ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY3:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 12
; X32: [[GEP3:%[0-9]+]]:_(p0) = G_GEP [[COPY3]], [[C3]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP3]](p0) :: (store 4 into stack + 12, align 0)
- ; X32: [[COPY4:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY4:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; X32: [[GEP4:%[0-9]+]]:_(p0) = G_GEP [[COPY4]], [[C4]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP4]](p0) :: (store 4 into stack + 16, align 0)
- ; X32: [[COPY5:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY5:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 20
; X32: [[GEP5:%[0-9]+]]:_(p0) = G_GEP [[COPY5]], [[C5]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP5]](p0) :: (store 4 into stack + 20, align 0)
- ; X32: [[COPY6:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY6:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; X32: [[GEP6:%[0-9]+]]:_(p0) = G_GEP [[COPY6]], [[C6]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP6]](p0) :: (store 4 into stack + 24, align 0)
- ; X32: [[COPY7:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY7:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 28
; X32: [[GEP7:%[0-9]+]]:_(p0) = G_GEP [[COPY7]], [[C7]](s32)
; X32: G_STORE [[LOAD]](s32), [[GEP7]](p0) :: (store 4 into stack + 28, align 0)
- ; X32: CALLpcrel32 @simple_arg8_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 32, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @simple_arg8_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 32, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_simple_arg8_call
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %edi
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; X64: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %edi = COPY [[COPY]](s32)
- ; X64: %esi = COPY [[COPY]](s32)
- ; X64: %edx = COPY [[COPY]](s32)
- ; X64: %ecx = COPY [[COPY]](s32)
- ; X64: %r8d = COPY [[COPY]](s32)
- ; X64: %r9d = COPY [[COPY]](s32)
- ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsp
+ ; X64: liveins: $edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; X64: ADJCALLSTACKDOWN64 16, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $edi = COPY [[COPY]](s32)
+ ; X64: $esi = COPY [[COPY]](s32)
+ ; X64: $edx = COPY [[COPY]](s32)
+ ; X64: $ecx = COPY [[COPY]](s32)
+ ; X64: $r8d = COPY [[COPY]](s32)
+ ; X64: $r9d = COPY [[COPY]](s32)
+ ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsp
; X64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; X64: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C]](s64)
; X64: G_STORE [[COPY]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X64: [[COPY2:%[0-9]+]]:_(p0) = COPY %rsp
+ ; X64: [[COPY2:%[0-9]+]]:_(p0) = COPY $rsp
; X64: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
; X64: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C1]](s64)
; X64: G_STORE [[COPY]](s32), [[GEP1]](p0) :: (store 4 into stack + 8, align 0)
- ; X64: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit %esi, implicit %edx, implicit %ecx, implicit %r8d, implicit %r9d
- ; X64: ADJCALLSTACKUP64 16, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: CALL64pcrel32 @simple_arg8_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit $esi, implicit $edx, implicit $ecx, implicit $r8d, implicit $r9d
+ ; X64: ADJCALLSTACKUP64 16, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void @simple_arg8_callee(i32 %in0, i32 %in0, i32 %in0, i32 %in0,i32 %in0, i32 %in0, i32 %in0, i32 %in0)
ret void
@@ -449,28 +449,28 @@ define i32 @test_simple_return_callee()
; X32-LABEL: name: test_simple_return_callee
; X32: bb.1 (%ir-block.0):
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C1]](s32)
; X32: G_STORE [[C]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @simple_return_callee, csr_32, implicit %esp, implicit %ssp, implicit-def %eax
- ; X32: [[COPY1:%[0-9]+]]:_(s32) = COPY %eax
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @simple_return_callee, csr_32, implicit $esp, implicit $ssp, implicit-def $eax
+ ; X32: [[COPY1:%[0-9]+]]:_(s32) = COPY $eax
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY1]], [[COPY1]]
- ; X32: %eax = COPY [[ADD]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ADD]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_simple_return_callee
; X64: bb.1 (%ir-block.0):
; X64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %edi = COPY [[C]](s32)
- ; X64: CALL64pcrel32 @simple_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %edi, implicit-def %eax
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %eax
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $edi = COPY [[C]](s32)
+ ; X64: CALL64pcrel32 @simple_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $edi, implicit-def $eax
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $eax
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY]]
- ; X64: %eax = COPY [[ADD]](s32)
- ; X64: RET 0, implicit %eax
+ ; X64: $eax = COPY [[ADD]](s32)
+ ; X64: RET 0, implicit $eax
%call = call i32 @simple_return_callee(i32 5)
%r = add i32 %call, %call
ret i32 %r
@@ -480,51 +480,51 @@ declare <8 x i32> @split_return_callee(<
define <8 x i32> @test_split_return_callee(<8 x i32> %arg1, <8 x i32> %arg2) {
; X32-LABEL: name: test_split_return_callee
; X32: bb.1 (%ir-block.0):
- ; X32: liveins: %xmm0, %xmm1, %xmm2
- ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY %xmm2
+ ; X32: liveins: $xmm0, $xmm1, $xmm2
+ ; X32: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X32: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 16 from %fixed-stack.0, align 0)
; X32: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X32: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[LOAD]](<4 x s32>)
- ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>)
- ; X32: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X32: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit %esp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
- ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X32: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X32: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X32: CALLpcrel32 @split_return_callee, csr_32, implicit $esp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
+ ; X32: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X32: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X32: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY3]](<4 x s32>), [[COPY4]](<4 x s32>)
- ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]]
; X32: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
- ; X32: %xmm0 = COPY [[UV2]](<4 x s32>)
- ; X32: %xmm1 = COPY [[UV3]](<4 x s32>)
- ; X32: RET 0, implicit %xmm0, implicit %xmm1
+ ; X32: $xmm0 = COPY [[UV2]](<4 x s32>)
+ ; X32: $xmm1 = COPY [[UV3]](<4 x s32>)
+ ; X32: RET 0, implicit $xmm0, implicit $xmm1
; X64-LABEL: name: test_split_return_callee
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %xmm0, %xmm1, %xmm2, %xmm3
- ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
- ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY %xmm2
- ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY %xmm3
+ ; X64: liveins: $xmm0, $xmm1, $xmm2, $xmm3
+ ; X64: [[COPY:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
+ ; X64: [[COPY2:%[0-9]+]]:_(<4 x s32>) = COPY $xmm2
+ ; X64: [[COPY3:%[0-9]+]]:_(<4 x s32>) = COPY $xmm3
; X64: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY]](<4 x s32>), [[COPY1]](<4 x s32>)
; X64: [[MV1:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY2]](<4 x s32>), [[COPY3]](<4 x s32>)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[MV1]](<8 x s32>)
- ; X64: %xmm0 = COPY [[UV]](<4 x s32>)
- ; X64: %xmm1 = COPY [[UV1]](<4 x s32>)
- ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit %rsp, implicit %ssp, implicit %xmm0, implicit %xmm1, implicit-def %xmm0, implicit-def %xmm1
- ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY %xmm0
- ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; X64: $xmm0 = COPY [[UV]](<4 x s32>)
+ ; X64: $xmm1 = COPY [[UV1]](<4 x s32>)
+ ; X64: CALL64pcrel32 @split_return_callee, csr_64, implicit $rsp, implicit $ssp, implicit $xmm0, implicit $xmm1, implicit-def $xmm0, implicit-def $xmm1
+ ; X64: [[COPY4:%[0-9]+]]:_(<4 x s32>) = COPY $xmm0
+ ; X64: [[COPY5:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; X64: [[MV2:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[COPY4]](<4 x s32>), [[COPY5]](<4 x s32>)
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[MV]], [[MV2]]
; X64: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[ADD]](<8 x s32>)
- ; X64: %xmm0 = COPY [[UV2]](<4 x s32>)
- ; X64: %xmm1 = COPY [[UV3]](<4 x s32>)
- ; X64: RET 0, implicit %xmm0, implicit %xmm1
+ ; X64: $xmm0 = COPY [[UV2]](<4 x s32>)
+ ; X64: $xmm1 = COPY [[UV3]](<4 x s32>)
+ ; X64: RET 0, implicit $xmm0, implicit $xmm1
%call = call <8 x i32> @split_return_callee(<8 x i32> %arg2)
%r = add <8 x i32> %arg1, %call
ret <8 x i32> %r
@@ -535,17 +535,17 @@ define void @test_indirect_call(void()*
; X32: bb.1 (%ir-block.0):
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:gr32(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
- ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: CALL32r [[LOAD]](p0), csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: ADJCALLSTACKDOWN32 0, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: CALL32r [[LOAD]](p0), csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_indirect_call
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi
- ; X64: [[COPY:%[0-9]+]]:gr64(p0) = COPY %rdi
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: CALL64r [[COPY]](p0), csr_64, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: liveins: $rdi
+ ; X64: [[COPY:%[0-9]+]]:gr64(p0) = COPY $rdi
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: CALL64r [[COPY]](p0), csr_64, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
call void %func()
ret void
@@ -559,51 +559,51 @@ define void @test_abi_exts_call(i8* %add
; X32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
; X32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[LOAD]](p0) :: (load 1 from %ir.addr)
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD1]](s8)
; X32: G_STORE [[ANYEXT]](s32), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
; X32: G_STORE [[SEXT]](s32), [[GEP1]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKDOWN32 4, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY2:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP2:%[0-9]+]]:_(p0) = G_GEP [[COPY2]], [[C2]](s32)
; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
; X32: G_STORE [[ZEXT]](s32), [[GEP2]](p0) :: (store 4 into stack, align 0)
- ; X32: CALLpcrel32 @take_char, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 4, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @take_char, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 4, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_abi_exts_call
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
+ ; X64: liveins: $rdi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
; X64: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.addr)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LOAD]](s8)
- ; X64: %edi = COPY [[ANYEXT]](s32)
- ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: $edi = COPY [[ANYEXT]](s32)
+ ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD]](s8)
- ; X64: %edi = COPY [[SEXT]](s32)
- ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: $edi = COPY [[SEXT]](s32)
+ ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD]](s8)
- ; X64: %edi = COPY [[ZEXT]](s32)
- ; X64: CALL64pcrel32 @take_char, csr_64, implicit %rsp, implicit %ssp, implicit %edi
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: $edi = COPY [[ZEXT]](s32)
+ ; X64: CALL64pcrel32 @take_char, csr_64, implicit $rsp, implicit $ssp, implicit $edi
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
%val = load i8, i8* %addr
call void @take_char(i8 %val)
@@ -622,31 +622,31 @@ define void @test_variadic_call_1(i8** %
; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
; X32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.val_ptr)
- ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 8, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD3]](s32), [[GEP1]](p0) :: (store 4 into stack + 4, align 0)
- ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 8, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 8, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_variadic_call_1
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi, %rsi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi
+ ; X64: liveins: $rdi, $rsi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr)
; X64: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.val_ptr)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %rdi = COPY [[LOAD]](p0)
- ; X64: %esi = COPY [[LOAD1]](s32)
- ; X64: %al = MOV8ri 0
- ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %esi, implicit %al
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $rdi = COPY [[LOAD]](p0)
+ ; X64: $esi = COPY [[LOAD1]](s32)
+ ; X64: $al = MOV8ri 0
+ ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $al
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
%addr = load i8*, i8** %addr_ptr
%val = load i32, i32* %val_ptr
@@ -663,31 +663,31 @@ define void @test_variadic_call_2(i8** %
; X32: [[LOAD1:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
; X32: [[LOAD2:%[0-9]+]]:_(p0) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.addr_ptr)
; X32: [[LOAD3:%[0-9]+]]:_(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.val_ptr, align 4)
- ; X32: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
- ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: ADJCALLSTACKDOWN32 12, 0, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
+ ; X32: [[COPY:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY]], [[C]](s32)
; X32: G_STORE [[LOAD2]](p0), [[GEP]](p0) :: (store 4 into stack, align 0)
- ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY %esp
+ ; X32: [[COPY1:%[0-9]+]]:_(p0) = COPY $esp
; X32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; X32: [[GEP1:%[0-9]+]]:_(p0) = G_GEP [[COPY1]], [[C1]](s32)
; X32: G_STORE [[LOAD3]](s64), [[GEP1]](p0) :: (store 8 into stack + 4, align 0)
- ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit %esp, implicit %ssp
- ; X32: ADJCALLSTACKUP32 12, 0, implicit-def %esp, implicit-def %eflags, implicit-def %ssp, implicit %esp, implicit %ssp
+ ; X32: CALLpcrel32 @variadic_callee, csr_32, implicit $esp, implicit $ssp
+ ; X32: ADJCALLSTACKUP32 12, 0, implicit-def $esp, implicit-def $eflags, implicit-def $ssp, implicit $esp, implicit $ssp
; X32: RET 0
; X64-LABEL: name: test_variadic_call_2
; X64: bb.1 (%ir-block.0):
- ; X64: liveins: %rdi, %rsi
- ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi
+ ; X64: liveins: $rdi, $rsi
+ ; X64: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; X64: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
; X64: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.addr_ptr)
; X64: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.val_ptr)
- ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
- ; X64: %rdi = COPY [[LOAD]](p0)
- ; X64: %xmm0 = COPY [[LOAD1]](s64)
- ; X64: %al = MOV8ri 1
- ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit %rsp, implicit %ssp, implicit %rdi, implicit %xmm0, implicit %al
- ; X64: ADJCALLSTACKUP64 0, 0, implicit-def %rsp, implicit-def %eflags, implicit-def %ssp, implicit %rsp, implicit %ssp
+ ; X64: ADJCALLSTACKDOWN64 0, 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
+ ; X64: $rdi = COPY [[LOAD]](p0)
+ ; X64: $xmm0 = COPY [[LOAD1]](s64)
+ ; X64: $al = MOV8ri 1
+ ; X64: CALL64pcrel32 @variadic_callee, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $xmm0, implicit $al
+ ; X64: ADJCALLSTACKUP64 0, 0, implicit-def $rsp, implicit-def $eflags, implicit-def $ssp, implicit $rsp, implicit $ssp
; X64: RET 0
%addr = load i8*, i8** %addr_ptr
%val = load double, double* %val_ptr
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v128.mir Wed Jan 31 14:04:26 2018
@@ -33,18 +33,18 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v16i8
; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<16 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<16 x s8>)
+ ; ALL: $xmm0 = COPY [[ADD]](<16 x s8>)
; ALL: RET 0
%0(<16 x s8>) = IMPLICIT_DEF
%1(<16 x s8>) = IMPLICIT_DEF
%2(<16 x s8>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -59,18 +59,18 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v8i16
; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<8 x s16>)
+ ; ALL: $xmm0 = COPY [[ADD]](<8 x s16>)
; ALL: RET 0
%0(<8 x s16>) = IMPLICIT_DEF
%1(<8 x s16>) = IMPLICIT_DEF
%2(<8 x s16>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -85,18 +85,18 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v4i32
; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<4 x s32>)
+ ; ALL: $xmm0 = COPY [[ADD]](<4 x s32>)
; ALL: RET 0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<4 x s32>) = IMPLICIT_DEF
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -111,18 +111,18 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_add_v2i64
; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
; ALL: [[DEF1:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
; ALL: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; ALL: %xmm0 = COPY [[ADD]](<2 x s64>)
+ ; ALL: $xmm0 = COPY [[ADD]](<2 x s64>)
; ALL: RET 0
%0(<2 x s64>) = IMPLICIT_DEF
%1(<2 x s64>) = IMPLICIT_DEF
%2(<2 x s64>) = G_ADD %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v256.mir Wed Jan 31 14:04:26 2018
@@ -36,7 +36,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_add_v32i8
; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
@@ -51,15 +51,15 @@ body: |
; AVX1: [[ADD1:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV1]], [[UV3]]
; SSE2: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
- ; SSE2: %ymm0 = COPY [[MV]](<32 x s8>)
- ; AVX1: %ymm0 = COPY [[MV]](<32 x s8>)
+ ; SSE2: $ymm0 = COPY [[MV]](<32 x s8>)
+ ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
; AVX2: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<32 x s8>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<32 x s8>)
; ALL: RET 0
%0(<32 x s8>) = IMPLICIT_DEF
%1(<32 x s8>) = IMPLICIT_DEF
%2(<32 x s8>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -74,7 +74,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_add_v16i16
; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
@@ -89,15 +89,15 @@ body: |
; AVX1: [[ADD:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV]], [[UV2]]
; AVX1: [[ADD1:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV1]], [[UV3]]
; AVX1: [[MV:%[0-9]+]]:_(<16 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>)
- ; SSE2: %ymm0 = COPY [[MV]](<16 x s16>)
- ; AVX1: %ymm0 = COPY [[MV]](<16 x s16>)
+ ; SSE2: $ymm0 = COPY [[MV]](<16 x s16>)
+ ; AVX1: $ymm0 = COPY [[MV]](<16 x s16>)
; AVX2: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<16 x s16>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<16 x s16>)
; ALL: RET 0
%0(<16 x s16>) = IMPLICIT_DEF
%1(<16 x s16>) = IMPLICIT_DEF
%2(<16 x s16>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -112,7 +112,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_add_v8i32
; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
@@ -122,20 +122,20 @@ body: |
; SSE2: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
; SSE2: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
; SSE2: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
- ; SSE2: %ymm0 = COPY [[MV]](<8 x s32>)
+ ; SSE2: $ymm0 = COPY [[MV]](<8 x s32>)
; AVX1: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF]](<8 x s32>)
; AVX1: [[UV2:%[0-9]+]]:_(<4 x s32>), [[UV3:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[DEF1]](<8 x s32>)
; AVX1: [[ADD:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV]], [[UV2]]
; AVX1: [[ADD1:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV1]], [[UV3]]
; AVX1: [[MV:%[0-9]+]]:_(<8 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>)
- ; AVX1: %ymm0 = COPY [[MV]](<8 x s32>)
+ ; AVX1: $ymm0 = COPY [[MV]](<8 x s32>)
; AVX2: [[ADD:%[0-9]+]]:_(<8 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<8 x s32>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<8 x s32>)
; ALL: RET 0
%0(<8 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = IMPLICIT_DEF
%2(<8 x s32>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -150,7 +150,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_add_v4i64
; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
@@ -165,15 +165,15 @@ body: |
; AVX1: [[ADD:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV]], [[UV2]]
; AVX1: [[ADD1:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV1]], [[UV3]]
; AVX1: [[MV:%[0-9]+]]:_(<4 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>)
- ; SSE2: %ymm0 = COPY [[MV]](<4 x s64>)
- ; AVX1: %ymm0 = COPY [[MV]](<4 x s64>)
+ ; SSE2: $ymm0 = COPY [[MV]](<4 x s64>)
+ ; AVX1: $ymm0 = COPY [[MV]](<4 x s64>)
; AVX2: [[ADD:%[0-9]+]]:_(<4 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX2: %ymm0 = COPY [[ADD]](<4 x s64>)
+ ; AVX2: $ymm0 = COPY [[ADD]](<4 x s64>)
; ALL: RET 0
%0(<4 x s64>) = IMPLICIT_DEF
%1(<4 x s64>) = IMPLICIT_DEF
%2(<4 x s64>) = G_ADD %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add-v512.mir Wed Jan 31 14:04:26 2018
@@ -40,7 +40,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v64i8
; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
@@ -52,20 +52,20 @@ body: |
; AVX1: [[ADD2:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>), [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
- ; AVX1: %zmm0 = COPY [[MV]](<64 x s8>)
+ ; AVX1: $zmm0 = COPY [[MV]](<64 x s8>)
; AVX512F: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF]](<64 x s8>)
; AVX512F: [[UV2:%[0-9]+]]:_(<32 x s8>), [[UV3:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[DEF1]](<64 x s8>)
; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV]], [[UV2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[UV1]], [[UV3]]
; AVX512F: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[ADD]](<32 x s8>), [[ADD1]](<32 x s8>)
- ; AVX512F: %zmm0 = COPY [[MV]](<64 x s8>)
+ ; AVX512F: $zmm0 = COPY [[MV]](<64 x s8>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<64 x s8>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<64 x s8>)
; ALL: RET 0
%0(<64 x s8>) = IMPLICIT_DEF
%1(<64 x s8>) = IMPLICIT_DEF
%2(<64 x s8>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -80,7 +80,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v32i16
; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
@@ -92,20 +92,20 @@ body: |
; AVX1: [[ADD2:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<8 x s16>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<8 x s16>), [[ADD1]](<8 x s16>), [[ADD2]](<8 x s16>), [[ADD3]](<8 x s16>)
- ; AVX1: %zmm0 = COPY [[MV]](<32 x s16>)
+ ; AVX1: $zmm0 = COPY [[MV]](<32 x s16>)
; AVX512F: [[UV:%[0-9]+]]:_(<16 x s16>), [[UV1:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF]](<32 x s16>)
; AVX512F: [[UV2:%[0-9]+]]:_(<16 x s16>), [[UV3:%[0-9]+]]:_(<16 x s16>) = G_UNMERGE_VALUES [[DEF1]](<32 x s16>)
; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV]], [[UV2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<16 x s16>) = G_ADD [[UV1]], [[UV3]]
; AVX512F: [[MV:%[0-9]+]]:_(<32 x s16>) = G_MERGE_VALUES [[ADD]](<16 x s16>), [[ADD1]](<16 x s16>)
- ; AVX512F: %zmm0 = COPY [[MV]](<32 x s16>)
+ ; AVX512F: $zmm0 = COPY [[MV]](<32 x s16>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<32 x s16>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<32 x s16>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<32 x s16>)
; ALL: RET 0
%0(<32 x s16>) = IMPLICIT_DEF
%1(<32 x s16>) = IMPLICIT_DEF
%2(<32 x s16>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -120,7 +120,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v16i32
; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
@@ -132,16 +132,16 @@ body: |
; AVX1: [[ADD2:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<4 x s32>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<16 x s32>) = G_MERGE_VALUES [[ADD]](<4 x s32>), [[ADD1]](<4 x s32>), [[ADD2]](<4 x s32>), [[ADD3]](<4 x s32>)
- ; AVX1: %zmm0 = COPY [[MV]](<16 x s32>)
+ ; AVX1: $zmm0 = COPY [[MV]](<16 x s32>)
; AVX512F: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512F: %zmm0 = COPY [[ADD]](<16 x s32>)
+ ; AVX512F: $zmm0 = COPY [[ADD]](<16 x s32>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<16 x s32>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<16 x s32>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<16 x s32>)
; ALL: RET 0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = IMPLICIT_DEF
%2(<16 x s32>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -156,7 +156,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v8i64
; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
@@ -168,16 +168,16 @@ body: |
; AVX1: [[ADD2:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV2]], [[UV6]]
; AVX1: [[ADD3:%[0-9]+]]:_(<2 x s64>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<8 x s64>) = G_MERGE_VALUES [[ADD]](<2 x s64>), [[ADD1]](<2 x s64>), [[ADD2]](<2 x s64>), [[ADD3]](<2 x s64>)
- ; AVX1: %zmm0 = COPY [[MV]](<8 x s64>)
+ ; AVX1: $zmm0 = COPY [[MV]](<8 x s64>)
; AVX512F: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512F: %zmm0 = COPY [[ADD]](<8 x s64>)
+ ; AVX512F: $zmm0 = COPY [[ADD]](<8 x s64>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<8 x s64>) = G_ADD [[DEF]], [[DEF1]]
- ; AVX512BW: %zmm0 = COPY [[ADD]](<8 x s64>)
+ ; AVX512BW: $zmm0 = COPY [[ADD]](<8 x s64>)
; ALL: RET 0
%0(<8 x s64>) = IMPLICIT_DEF
%1(<8 x s64>) = IMPLICIT_DEF
%2(<8 x s64>) = G_ADD %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -200,13 +200,13 @@ registers:
#
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1, %ymm2, %ymm3
+ liveins: $ymm0, $ymm1, $ymm2, $ymm3
; ALL-LABEL: name: test_add_v64i8_2
- ; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY %ymm0
- ; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY %ymm1
- ; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY %ymm2
- ; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY %ymm3
+ ; ALL: [[COPY:%[0-9]+]]:_(<32 x s8>) = COPY $ymm0
+ ; ALL: [[COPY1:%[0-9]+]]:_(<32 x s8>) = COPY $ymm1
+ ; ALL: [[COPY2:%[0-9]+]]:_(<32 x s8>) = COPY $ymm2
+ ; ALL: [[COPY3:%[0-9]+]]:_(<32 x s8>) = COPY $ymm3
; AVX1: [[UV:%[0-9]+]]:_(<16 x s8>), [[UV1:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY]](<32 x s8>)
; AVX1: [[UV2:%[0-9]+]]:_(<16 x s8>), [[UV3:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY1]](<32 x s8>)
; AVX1: [[UV4:%[0-9]+]]:_(<16 x s8>), [[UV5:%[0-9]+]]:_(<16 x s8>) = G_UNMERGE_VALUES [[COPY2]](<32 x s8>)
@@ -217,29 +217,29 @@ body: |
; AVX1: [[ADD3:%[0-9]+]]:_(<16 x s8>) = G_ADD [[UV3]], [[UV7]]
; AVX1: [[MV:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD]](<16 x s8>), [[ADD1]](<16 x s8>)
; AVX1: [[MV1:%[0-9]+]]:_(<32 x s8>) = G_MERGE_VALUES [[ADD2]](<16 x s8>), [[ADD3]](<16 x s8>)
- ; AVX1: %ymm0 = COPY [[MV]](<32 x s8>)
- ; AVX1: %ymm1 = COPY [[MV1]](<32 x s8>)
+ ; AVX1: $ymm0 = COPY [[MV]](<32 x s8>)
+ ; AVX1: $ymm1 = COPY [[MV1]](<32 x s8>)
; AVX512F: [[ADD:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY]], [[COPY2]]
; AVX512F: [[ADD1:%[0-9]+]]:_(<32 x s8>) = G_ADD [[COPY1]], [[COPY3]]
- ; AVX512F: %ymm0 = COPY [[ADD]](<32 x s8>)
- ; AVX512F: %ymm1 = COPY [[ADD1]](<32 x s8>)
+ ; AVX512F: $ymm0 = COPY [[ADD]](<32 x s8>)
+ ; AVX512F: $ymm1 = COPY [[ADD1]](<32 x s8>)
; AVX512BW: [[MV:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY]](<32 x s8>), [[COPY1]](<32 x s8>)
; AVX512BW: [[MV1:%[0-9]+]]:_(<64 x s8>) = G_MERGE_VALUES [[COPY2]](<32 x s8>), [[COPY3]](<32 x s8>)
; AVX512BW: [[ADD:%[0-9]+]]:_(<64 x s8>) = G_ADD [[MV]], [[MV1]]
; AVX512BW: [[UV:%[0-9]+]]:_(<32 x s8>), [[UV1:%[0-9]+]]:_(<32 x s8>) = G_UNMERGE_VALUES [[ADD]](<64 x s8>)
- ; AVX512BW: %ymm0 = COPY [[UV]](<32 x s8>)
- ; AVX512BW: %ymm1 = COPY [[UV1]](<32 x s8>)
- ; ALL: RET 0, implicit %ymm0, implicit %ymm1
- %2(<32 x s8>) = COPY %ymm0
- %3(<32 x s8>) = COPY %ymm1
- %4(<32 x s8>) = COPY %ymm2
- %5(<32 x s8>) = COPY %ymm3
+ ; AVX512BW: $ymm0 = COPY [[UV]](<32 x s8>)
+ ; AVX512BW: $ymm1 = COPY [[UV1]](<32 x s8>)
+ ; ALL: RET 0, implicit $ymm0, implicit $ymm1
+ %2(<32 x s8>) = COPY $ymm0
+ %3(<32 x s8>) = COPY $ymm1
+ %4(<32 x s8>) = COPY $ymm2
+ %5(<32 x s8>) = COPY $ymm3
%0(<64 x s8>) = G_MERGE_VALUES %2(<32 x s8>), %3(<32 x s8>)
%1(<64 x s8>) = G_MERGE_VALUES %4(<32 x s8>), %5(<32 x s8>)
%6(<64 x s8>) = G_ADD %0, %1
%7(<32 x s8>), %8(<32 x s8>) = G_UNMERGE_VALUES %6(<64 x s8>)
- %ymm0 = COPY %7(<32 x s8>)
- %ymm1 = COPY %8(<32 x s8>)
- RET 0, implicit %ymm0, implicit %ymm1
+ $ymm0 = COPY %7(<32 x s8>)
+ $ymm1 = COPY %8(<32 x s8>)
+ RET 0, implicit $ymm0, implicit $ymm1
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-add.mir Wed Jan 31 14:04:26 2018
@@ -18,7 +18,7 @@ registers:
- { id: 0, class: _, preferred-register: '' }
- { id: 1, class: _, preferred-register: '' }
- { id: 2, class: _, preferred-register: '' }
-# CHECK: %0(s32) = COPY %edx
+# CHECK: %0(s32) = COPY $edx
# CHECK-NEXT: %3(s8) = G_TRUNC %0(s32)
# CHECK-NEXT: %4(s8) = G_TRUNC %0(s32)
# CHECK-NEXT: %5(s8) = G_ADD %3, %4
@@ -27,26 +27,26 @@ body: |
bb.1 (%ir-block.0):
; X64-LABEL: name: test_add_i1
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X64: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X64: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]]
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
+ ; X64: $eax = COPY [[ANYEXT]](s32)
; X64: RET 0
; X32-LABEL: name: test_add_i1
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X32: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X32: [[ADD:%[0-9]+]]:_(s8) = G_ADD [[TRUNC]], [[TRUNC1]]
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
+ ; X32: $eax = COPY [[ANYEXT]](s32)
; X32: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_ADD %1, %1
%3:_(s32) = G_ANYEXT %2
- %eax = COPY %3
+ $eax = COPY %3
RET 0
...
---
@@ -64,18 +64,18 @@ body: |
; X64: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X64: [[DEF1:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X64: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[DEF]], [[DEF1]]
- ; X64: %eax = COPY [[ADD]](s32)
+ ; X64: $eax = COPY [[ADD]](s32)
; X64: RET 0
; X32-LABEL: name: test_add_i32
; X32: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X32: [[DEF1:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; X32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[DEF]], [[DEF1]]
- ; X32: %eax = COPY [[ADD]](s32)
+ ; X32: $eax = COPY [[ADD]](s32)
; X32: RET 0
%0(s32) = IMPLICIT_DEF
%1(s32) = IMPLICIT_DEF
%2(s32) = G_ADD %0, %1
- %eax = COPY %2
+ $eax = COPY %2
RET 0
...
@@ -94,7 +94,7 @@ body: |
; X64: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64: [[DEF1:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; X64: [[ADD:%[0-9]+]]:_(s64) = G_ADD [[DEF]], [[DEF1]]
- ; X64: %rax = COPY [[ADD]](s64)
+ ; X64: $rax = COPY [[ADD]](s64)
; X64: RET 0
; X32-LABEL: name: test_add_i64
; X32: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
@@ -106,12 +106,12 @@ body: |
; X32: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV]], [[UV2]], [[TRUNC]]
; X32: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDE1]]
; X32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDE]](s32), [[UADDE2]](s32)
- ; X32: %rax = COPY [[MV]](s64)
+ ; X32: $rax = COPY [[MV]](s64)
; X32: RET 0
%0(s64) = IMPLICIT_DEF
%1(s64) = IMPLICIT_DEF
%2(s64) = G_ADD %0, %1
- %rax = COPY %2
+ $rax = COPY %2
RET 0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-and-scalar.mir Wed Jan 31 14:04:26 2018
@@ -41,18 +41,18 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_and_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[TRUNC1]]
; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[AND]](s8)
- ; CHECK: %eax = COPY [[ANYEXT]](s32)
+ ; CHECK: $eax = COPY [[ANYEXT]](s32)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_AND %1, %1
%3:_(s32) = G_ANYEXT %2
- %eax = COPY %3
+ $eax = COPY %3
RET 0
...
---
@@ -72,12 +72,12 @@ body: |
; CHECK-LABEL: name: test_and_i8
; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %al = COPY [[AND]](s8)
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[AND]](s8)
+ ; CHECK: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_AND %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -97,12 +97,12 @@ body: |
; CHECK-LABEL: name: test_and_i16
; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s16) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %ax = COPY [[AND]](s16)
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[AND]](s16)
+ ; CHECK: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_AND %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -122,12 +122,12 @@ body: |
; CHECK-LABEL: name: test_and_i32
; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %eax = COPY [[AND]](s32)
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[AND]](s32)
+ ; CHECK: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_AND %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -147,11 +147,11 @@ body: |
; CHECK-LABEL: name: test_and_i64
; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[DEF]], [[DEF]]
- ; CHECK: %rax = COPY [[AND]](s64)
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[AND]](s64)
+ ; CHECK: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_AND %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-brcond.mir Wed Jan 31 14:04:26 2018
@@ -30,17 +30,17 @@ registers:
# ALL-NEXT: G_BRCOND %1(s1), %[[TRUE:bb.[0-9]+]]
# ALL-NEXT: G_BR %[[FALSE:bb.[0-9]+]]
# ALL: [[TRUE]].{{[a-zA-Z0-9.]+}}:
-# ALL-NEXT: %eax = COPY %2(s32)
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2(s32)
+# ALL-NEXT: RET 0, implicit $eax
# ALL: [[FALSE]].{{[a-zA-Z0-9.]+}}:
-# ALL-NEXT: %eax = COPY %3(s32)
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %3(s32)
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%2(s32) = G_CONSTANT i32 0
%3(s32) = G_CONSTANT i32 1
%1(s1) = G_TRUNC %0(s32)
@@ -48,11 +48,11 @@ body: |
G_BR %bb.3
bb.2.if.then:
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
bb.3.if.else:
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-cmp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-cmp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-cmp.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-cmp.mir Wed Jan 31 14:04:26 2018
@@ -45,21 +45,21 @@ registers:
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_cmp_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
- ; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY %sil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s8) = COPY $sil
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s8), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s1) = G_ICMP intpred(ult), %0(s8), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -74,21 +74,21 @@ registers:
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_cmp_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
- ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY %si
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $si
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s16), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s1) = G_ICMP intpred(ult), %0(s16), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -103,21 +103,21 @@ registers:
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_cmp_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s32), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ult), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -132,21 +132,21 @@ registers:
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_cmp_i64
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s1) = G_ICMP intpred(ult), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -161,20 +161,20 @@ registers:
- { id: 3, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_cmp_p0
- ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY %rsi
+ ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(p0) = COPY $rsi
; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](p0), [[COPY1]]
; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1)
- ; CHECK: %eax = COPY [[ZEXT]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(p0) = COPY %rdi
- %1(p0) = COPY %rsi
+ ; CHECK: $eax = COPY [[ZEXT]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
+ %1(p0) = COPY $rsi
%2(s1) = G_ICMP intpred(ult), %0(p0), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-constant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-constant.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-constant.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-constant.mir Wed Jan 31 14:04:26 2018
@@ -20,46 +20,46 @@ body: |
; X32-LABEL: name: test_constant
; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
+ ; X32: $eax = COPY [[ANYEXT]](s32)
; X32: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 8
- ; X32: %al = COPY [[C1]](s8)
+ ; X32: $al = COPY [[C1]](s8)
; X32: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16
- ; X32: %ax = COPY [[C2]](s16)
+ ; X32: $ax = COPY [[C2]](s16)
; X32: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; X32: %eax = COPY [[C3]](s32)
+ ; X32: $eax = COPY [[C3]](s32)
; X32: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
; X32: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; X32: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C4]](s32), [[C5]](s32)
- ; X32: %rax = COPY [[MV]](s64)
+ ; X32: $rax = COPY [[MV]](s64)
; X32: RET 0
; X64-LABEL: name: test_constant
; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[C]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
+ ; X64: $eax = COPY [[ANYEXT]](s32)
; X64: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 8
- ; X64: %al = COPY [[C1]](s8)
+ ; X64: $al = COPY [[C1]](s8)
; X64: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 16
- ; X64: %ax = COPY [[C2]](s16)
+ ; X64: $ax = COPY [[C2]](s16)
; X64: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
- ; X64: %eax = COPY [[C3]](s32)
+ ; X64: $eax = COPY [[C3]](s32)
; X64: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
- ; X64: %rax = COPY [[C4]](s64)
+ ; X64: $rax = COPY [[C4]](s64)
; X64: RET 0
%0(s1) = G_CONSTANT i1 1
%5:_(s32) = G_ANYEXT %0
- %eax = COPY %5
+ $eax = COPY %5
%1(s8) = G_CONSTANT i8 8
- %al = COPY %1
+ $al = COPY %1
%2(s16) = G_CONSTANT i16 16
- %ax = COPY %2
+ $ax = COPY %2
%3(s32) = G_CONSTANT i32 32
- %eax = COPY %3
+ $eax = COPY %3
%4(s64) = G_CONSTANT i64 64
- %rax = COPY %4
+ $rax = COPY %4
RET 0
...
@@ -73,17 +73,17 @@ body: |
; X32-LABEL: name: test_fconstant
; X32: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; X32: %eax = COPY [[C]](s32)
+ ; X32: $eax = COPY [[C]](s32)
; X32: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
- ; X32: %rax = COPY [[C1]](s64)
+ ; X32: $rax = COPY [[C1]](s64)
; X64-LABEL: name: test_fconstant
; X64: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
- ; X64: %eax = COPY [[C]](s32)
+ ; X64: $eax = COPY [[C]](s32)
; X64: [[C1:%[0-9]+]]:_(s64) = G_FCONSTANT double 2.000000e+00
- ; X64: %rax = COPY [[C1]](s64)
+ ; X64: $rax = COPY [[C1]](s64)
%0(s32) = G_FCONSTANT float 1.0
- %eax = COPY %0
+ $eax = COPY %0
%1(s64) = G_FCONSTANT double 2.0
- %rax = COPY %1
+ $rax = COPY %1
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir Wed Jan 31 14:04:26 2018
@@ -73,19 +73,19 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[COPY]](s8)
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[TRUNC]](s1)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_SEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -98,17 +98,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_SEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -121,17 +121,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s16)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_SEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -144,17 +144,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_sext_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[COPY]](s32)
- ; CHECK: %rax = COPY [[SEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s32) = COPY %edi
+ ; CHECK: $rax = COPY [[SEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s32) = COPY $edi
%1(s64) = G_SEXT %0(s32)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -168,20 +168,20 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8)
; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[ANYEXT]], [[C]]
- ; CHECK: %rax = COPY [[AND]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[AND]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_ZEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -194,17 +194,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[ZEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[ZEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_ZEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -217,17 +217,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s16)
- ; CHECK: %rax = COPY [[ZEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; CHECK: $rax = COPY [[ZEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_ZEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -240,17 +240,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_zext_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32)
- ; CHECK: %rax = COPY [[ZEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s32) = COPY %edi
+ ; CHECK: $rax = COPY [[ZEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s32) = COPY $edi
%1(s64) = G_ZEXT %0(s32)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -264,18 +264,18 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_ANYEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -288,17 +288,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i8
- ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; CHECK: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s8)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_ANYEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -311,17 +311,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s16)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_ANYEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -334,17 +334,17 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_anyext_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[COPY]](s32)
- ; CHECK: %rax = COPY [[ANYEXT]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s32) = COPY %edi
+ ; CHECK: $rax = COPY [[ANYEXT]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s32) = COPY $edi
%1(s64) = G_ANYEXT %0(s32)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-ext.mir Wed Jan 31 14:04:26 2018
@@ -101,27 +101,27 @@ registers:
- { id: 2, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i1toi8
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X32: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]]
- ; X32: %al = COPY [[AND]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[AND]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_zext_i1toi8
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 1
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; X64: [[AND:%[0-9]+]]:_(s8) = G_AND [[TRUNC]], [[C]]
- ; X64: %al = COPY [[AND]](s8)
- ; X64: RET 0, implicit %al
- %1:_(s32) = COPY %edi
+ ; X64: $al = COPY [[AND]](s8)
+ ; X64: RET 0, implicit $al
+ %1:_(s32) = COPY $edi
%0:_(s1) = G_TRUNC %1(s32)
%2:_(s8) = G_ZEXT %0(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -134,27 +134,27 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i1toi16
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; X32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; X32: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
- ; X32: %ax = COPY [[AND]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[AND]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_zext_i1toi16
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 1
; X64: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; X64: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C]]
- ; X64: %ax = COPY [[AND]](s16)
- ; X64: RET 0, implicit %ax
- %1:_(s32) = COPY %edi
+ ; X64: $ax = COPY [[AND]](s16)
+ ; X64: RET 0, implicit $ax
+ %1:_(s32) = COPY $edi
%0:_(s1) = G_TRUNC %1(s32)
%2:_(s16) = G_ZEXT %0(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -168,27 +168,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i1
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
; X32: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
- ; X32: %eax = COPY [[AND]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[AND]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_zext_i1
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
; X64: [[AND:%[0-9]+]]:_(s32) = G_AND [[ANYEXT]], [[C]]
- ; X64: %eax = COPY [[AND]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[AND]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s32) = G_ZEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -201,22 +201,22 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i8toi16
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[COPY]](s8)
- ; X32: %ax = COPY [[ZEXT]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[ZEXT]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_zext_i8toi16
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[COPY]](s8)
- ; X64: %ax = COPY [[ZEXT]](s16)
- ; X64: RET 0, implicit %ax
- %0(s8) = COPY %dil
+ ; X64: $ax = COPY [[ZEXT]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s8) = COPY $dil
%1(s16) = G_ZEXT %0(s8)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -229,22 +229,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i8
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s8)
- ; X32: %eax = COPY [[ZEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ZEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_zext_i8
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s8)
- ; X64: %eax = COPY [[ZEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[ZEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -257,22 +257,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_zext_i16
- ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X32: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16)
- ; X32: %eax = COPY [[ZEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ZEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_zext_i16
- ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X64: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[COPY]](s16)
- ; X64: %eax = COPY [[ZEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s16) = COPY %di
+ ; X64: $eax = COPY [[ZEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s16) = COPY $di
%1(s32) = G_ZEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -285,20 +285,20 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i1toi8
; X32: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; X32: %al = COPY [[DEF]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[DEF]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_sext_i1toi8
; X64: [[DEF:%[0-9]+]]:_(s8) = G_IMPLICIT_DEF
- ; X64: %al = COPY [[DEF]](s8)
- ; X64: RET 0, implicit %al
+ ; X64: $al = COPY [[DEF]](s8)
+ ; X64: RET 0, implicit $al
%0(s1) = G_IMPLICIT_DEF
%1(s8) = G_SEXT %0(s1)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -311,20 +311,20 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i1toi16
; X32: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; X32: %ax = COPY [[DEF]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[DEF]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_sext_i1toi16
; X64: [[DEF:%[0-9]+]]:_(s16) = G_IMPLICIT_DEF
- ; X64: %ax = COPY [[DEF]](s16)
- ; X64: RET 0, implicit %ax
+ ; X64: $ax = COPY [[DEF]](s16)
+ ; X64: RET 0, implicit $ax
%0(s1) = G_IMPLICIT_DEF
%1(s16) = G_SEXT %0(s1)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -338,20 +338,20 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i1
; X32: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; X32: %eax = COPY [[DEF]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[DEF]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_sext_i1
; X64: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
- ; X64: %eax = COPY [[DEF]](s32)
- ; X64: RET 0, implicit %eax
+ ; X64: $eax = COPY [[DEF]](s32)
+ ; X64: RET 0, implicit $eax
%0(s1) = G_IMPLICIT_DEF
%2(s32) = G_SEXT %0(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -364,22 +364,22 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i8toi16
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8)
- ; X32: %ax = COPY [[SEXT]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[SEXT]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_sext_i8toi16
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[SEXT:%[0-9]+]]:_(s16) = G_SEXT [[COPY]](s8)
- ; X64: %ax = COPY [[SEXT]](s16)
- ; X64: RET 0, implicit %ax
- %0(s8) = COPY %dil
+ ; X64: $ax = COPY [[SEXT]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s8) = COPY $dil
%1(s16) = G_SEXT %0(s8)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -392,22 +392,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i8
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8)
- ; X32: %eax = COPY [[SEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[SEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_sext_i8
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s8)
- ; X64: %eax = COPY [[SEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[SEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s32) = G_SEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -420,22 +420,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_sext_i16
- ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X32: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16)
- ; X32: %eax = COPY [[SEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[SEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_sext_i16
- ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X64: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[COPY]](s16)
- ; X64: %eax = COPY [[SEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s16) = COPY %di
+ ; X64: $eax = COPY [[SEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s16) = COPY $di
%1(s32) = G_SEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -449,23 +449,23 @@ registers:
- { id: 2, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i1toi8
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; X32: %al = COPY [[TRUNC]](s8)
- ; X32: RET 0, implicit %al
+ ; X32: $al = COPY [[TRUNC]](s8)
+ ; X32: RET 0, implicit $al
; X64-LABEL: name: test_anyext_i1toi8
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; X64: %al = COPY [[TRUNC]](s8)
- ; X64: RET 0, implicit %al
- %0(s32) = COPY %edi
+ ; X64: $al = COPY [[TRUNC]](s8)
+ ; X64: RET 0, implicit $al
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ANYEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -479,23 +479,23 @@ registers:
- { id: 2, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i1toi16
- ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X32: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X32: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; X32: %ax = COPY [[TRUNC]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[TRUNC]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_anyext_i1toi16
- ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
+ ; X64: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
; X64: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; X64: %ax = COPY [[TRUNC]](s16)
- ; X64: RET 0, implicit %ax
- %0(s32) = COPY %edi
+ ; X64: $ax = COPY [[TRUNC]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s16) = G_ANYEXT %1(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -509,23 +509,23 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i1
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ANYEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_anyext_i1
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[ANYEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s32) = G_ANYEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -538,22 +538,22 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i8toi16
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
- ; X32: %ax = COPY [[ANYEXT]](s16)
- ; X32: RET 0, implicit %ax
+ ; X32: $ax = COPY [[ANYEXT]](s16)
+ ; X32: RET 0, implicit $ax
; X64-LABEL: name: test_anyext_i8toi16
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[COPY]](s8)
- ; X64: %ax = COPY [[ANYEXT]](s16)
- ; X64: RET 0, implicit %ax
- %0(s8) = COPY %dil
+ ; X64: $ax = COPY [[ANYEXT]](s16)
+ ; X64: RET 0, implicit $ax
+ %0(s8) = COPY $dil
%1(s16) = G_ANYEXT %0(s8)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -566,22 +566,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i8
- ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X32: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X32: %eax = COPY [[ANYEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ANYEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_anyext_i8
- ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY %dil
+ ; X64: [[COPY:%[0-9]+]]:_(s8) = COPY $dil
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s8)
- ; X64: %eax = COPY [[ANYEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s8) = COPY %dil
+ ; X64: $eax = COPY [[ANYEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s8) = COPY $dil
%1(s32) = G_ANYEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -594,21 +594,21 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; X32-LABEL: name: test_anyext_i16
- ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X32: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X32: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; X32: %eax = COPY [[ANYEXT]](s32)
- ; X32: RET 0, implicit %eax
+ ; X32: $eax = COPY [[ANYEXT]](s32)
+ ; X32: RET 0, implicit $eax
; X64-LABEL: name: test_anyext_i16
- ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY %di
+ ; X64: [[COPY:%[0-9]+]]:_(s16) = COPY $di
; X64: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[COPY]](s16)
- ; X64: %eax = COPY [[ANYEXT]](s32)
- ; X64: RET 0, implicit %eax
- %0(s16) = COPY %di
+ ; X64: $eax = COPY [[ANYEXT]](s32)
+ ; X64: RET 0, implicit $eax
+ %0(s16) = COPY $di
%1(s32) = G_ANYEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fadd-scalar.mir Wed Jan 31 14:04:26 2018
@@ -28,19 +28,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fadd_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FADD:%[0-9]+]]:_(s32) = G_FADD [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FADD]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FADD]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fadd_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FADD:%[0-9]+]]:_(s64) = G_FADD [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FADD]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FADD]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fdiv-scalar.mir Wed Jan 31 14:04:26 2018
@@ -28,19 +28,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fdiv_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FDIV:%[0-9]+]]:_(s32) = G_FDIV [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FDIV]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FDIV]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FDIV %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fdiv_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FDIV:%[0-9]+]]:_(s64) = G_FDIV [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FDIV]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FDIV]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FDIV %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fmul-scalar.mir Wed Jan 31 14:04:26 2018
@@ -28,19 +28,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fmul_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FMUL]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FMUL]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FMUL %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fmul_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FMUL:%[0-9]+]]:_(s64) = G_FMUL [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FMUL]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FMUL]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FMUL %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fpext-scalar.mir Wed Jan 31 14:04:26 2018
@@ -19,16 +19,16 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1.entry:
- liveins: %xmm0
+ liveins: $xmm0
; ALL-LABEL: name: test
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
; ALL: [[FPEXT:%[0-9]+]]:_(s64) = G_FPEXT [[COPY]](s32)
- ; ALL: %xmm0 = COPY [[FPEXT]](s64)
- ; ALL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; ALL: $xmm0 = COPY [[FPEXT]](s64)
+ ; ALL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s64) = G_FPEXT %0(s32)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-fsub-scalar.mir Wed Jan 31 14:04:26 2018
@@ -28,19 +28,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fsub_float
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm1
; CHECK: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FSUB]](s32)
- ; CHECK: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FSUB]](s32)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FSUB %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_fsub_double
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm1
; CHECK: [[FSUB:%[0-9]+]]:_(s64) = G_FSUB [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[FSUB]](s64)
- ; CHECK: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[FSUB]](s64)
+ ; CHECK: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FSUB %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec256.mir Wed Jan 31 14:04:26 2018
@@ -15,19 +15,19 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s32>) = COPY %ymm0
-# ALL-NEXT: %1:_(<4 x s32>) = COPY %xmm1
+# ALL: %0:_(<8 x s32>) = COPY $ymm0
+# ALL-NEXT: %1:_(<4 x s32>) = COPY $xmm1
# ALL-NEXT: %2:_(<8 x s32>) = G_INSERT %0, %1(<4 x s32>), 0
-# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<8 x s32>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-insert-vec512.mir Wed Jan 31 14:04:26 2018
@@ -21,19 +21,19 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_128
- ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:_(<4 x s32>) = COPY $xmm1
; ALL: [[INSERT:%[0-9]+]]:_(<16 x s32>) = G_INSERT [[COPY]], [[COPY1]](<4 x s32>), 0
- ; ALL: %zmm0 = COPY [[INSERT]](<16 x s32>)
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; ALL: $zmm0 = COPY [[INSERT]](<16 x s32>)
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -47,18 +47,18 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_256
- ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:_(<16 x s32>) = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:_(<8 x s32>) = COPY $ymm1
; ALL: [[INSERT:%[0-9]+]]:_(<16 x s32>) = G_INSERT [[COPY]], [[COPY1]](<8 x s32>), 0
- ; ALL: %zmm0 = COPY [[INSERT]](<16 x s32>)
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<8 x s32>) = COPY %ymm1
+ ; ALL: $zmm0 = COPY [[INSERT]](<16 x s32>)
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-memop-scalar.mir Wed Jan 31 14:04:26 2018
@@ -30,7 +30,7 @@ registers:
- { id: 10, class: _, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; X64-LABEL: name: test_memop_s8tos32
; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
@@ -88,7 +88,7 @@ liveins:
#
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; X64-LABEL: name: test_memop_s64
; X64: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-scalar.mir Wed Jan 31 14:04:26 2018
@@ -33,7 +33,7 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_mul_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[MUL:%[0-9]+]]:_(s8) = G_MUL [[TRUNC]], [[TRUNC1]]
@@ -43,7 +43,7 @@ body: |
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]]
; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_MUL %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -61,19 +61,19 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_mul_i16
- ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY %di
- ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY %si
+ ; CHECK: [[COPY:%[0-9]+]]:_(s16) = COPY $di
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s16) = COPY $si
; CHECK: [[MUL:%[0-9]+]]:_(s16) = G_MUL [[COPY]], [[COPY1]]
- ; CHECK: %ax = COPY [[MUL]](s16)
- ; CHECK: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; CHECK: $ax = COPY [[MUL]](s16)
+ ; CHECK: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_MUL %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -87,19 +87,19 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_mul_i32
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK: [[MUL:%[0-9]+]]:_(s32) = G_MUL [[COPY]], [[COPY1]]
- ; CHECK: %eax = COPY [[MUL]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: $eax = COPY [[MUL]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_MUL %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -113,18 +113,18 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_mul_i64
- ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
; CHECK: [[MUL:%[0-9]+]]:_(s64) = G_MUL [[COPY]], [[COPY1]]
- ; CHECK: %rax = COPY [[MUL]](s64)
- ; CHECK: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; CHECK: $rax = COPY [[MUL]](s64)
+ ; CHECK: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_MUL %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v128.mir Wed Jan 31 14:04:26 2018
@@ -33,20 +33,20 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s16>) = COPY %xmm0
-# ALL-NEXT: %1:_(<8 x s16>) = COPY %xmm1
+# ALL: %0:_(<8 x s16>) = COPY $xmm0
+# ALL-NEXT: %1:_(<8 x s16>) = COPY $xmm1
# ALL-NEXT: %2:_(<8 x s16>) = G_MUL %0, %1
-# ALL-NEXT: %xmm0 = COPY %2(<8 x s16>)
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %2(<8 x s16>)
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -63,20 +63,20 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<4 x s32>) = COPY %xmm0
-# ALL-NEXT: %1:_(<4 x s32>) = COPY %xmm1
+# ALL: %0:_(<4 x s32>) = COPY $xmm0
+# ALL-NEXT: %1:_(<4 x s32>) = COPY $xmm1
# ALL-NEXT: %2:_(<4 x s32>) = G_MUL %0, %1
-# ALL-NEXT: %xmm0 = COPY %2(<4 x s32>)
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %2(<4 x s32>)
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -93,19 +93,19 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<2 x s64>) = COPY %xmm0
-# ALL-NEXT: %1:_(<2 x s64>) = COPY %xmm1
+# ALL: %0:_(<2 x s64>) = COPY $xmm0
+# ALL-NEXT: %1:_(<2 x s64>) = COPY $xmm1
# ALL-NEXT: %2:_(<2 x s64>) = G_MUL %0, %1
-# ALL-NEXT: %xmm0 = COPY %2(<2 x s64>)
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %2(<2 x s64>)
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_MUL %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v256.mir Wed Jan 31 14:04:26 2018
@@ -33,20 +33,20 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<16 x s16>) = COPY %ymm0
-# ALL-NEXT: %1:_(<16 x s16>) = COPY %ymm1
+# ALL: %0:_(<16 x s16>) = COPY $ymm0
+# ALL-NEXT: %1:_(<16 x s16>) = COPY $ymm1
# ALL-NEXT: %2:_(<16 x s16>) = G_MUL %0, %1
-# ALL-NEXT: %ymm0 = COPY %2(<16 x s16>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<16 x s16>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_MUL %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -63,20 +63,20 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s32>) = COPY %ymm0
-# ALL-NEXT: %1:_(<8 x s32>) = COPY %ymm1
+# ALL: %0:_(<8 x s32>) = COPY $ymm0
+# ALL-NEXT: %1:_(<8 x s32>) = COPY $ymm1
# ALL-NEXT: %2:_(<8 x s32>) = G_MUL %0, %1
-# ALL-NEXT: %ymm0 = COPY %2(<8 x s32>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<8 x s32>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_MUL %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -93,19 +93,19 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<4 x s64>) = COPY %ymm0
-# ALL-NEXT: %1:_(<4 x s64>) = COPY %ymm1
+# ALL: %0:_(<4 x s64>) = COPY $ymm0
+# ALL-NEXT: %1:_(<4 x s64>) = COPY $ymm1
# ALL-NEXT: %2:_(<4 x s64>) = G_MUL %0, %1
-# ALL-NEXT: %ymm0 = COPY %2(<4 x s64>)
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %2(<4 x s64>)
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_MUL %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-mul-v512.mir Wed Jan 31 14:04:26 2018
@@ -35,20 +35,20 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<32 x s16>) = COPY %zmm0
-# ALL-NEXT: %1:_(<32 x s16>) = COPY %zmm1
+# ALL: %0:_(<32 x s16>) = COPY $zmm0
+# ALL-NEXT: %1:_(<32 x s16>) = COPY $zmm1
# ALL-NEXT: %2:_(<32 x s16>) = G_MUL %0, %1
-# ALL-NEXT: %zmm0 = COPY %2(<32 x s16>)
-# ALL-NEXT: RET 0, implicit %zmm0
+# ALL-NEXT: $zmm0 = COPY %2(<32 x s16>)
+# ALL-NEXT: RET 0, implicit $zmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_MUL %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -65,20 +65,20 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<16 x s32>) = COPY %zmm0
-# ALL-NEXT: %1:_(<16 x s32>) = COPY %zmm1
+# ALL: %0:_(<16 x s32>) = COPY $zmm0
+# ALL-NEXT: %1:_(<16 x s32>) = COPY $zmm1
# ALL-NEXT: %2:_(<16 x s32>) = G_MUL %0, %1
-# ALL-NEXT: %zmm0 = COPY %2(<16 x s32>)
-# ALL-NEXT: RET 0, implicit %zmm0
+# ALL-NEXT: $zmm0 = COPY %2(<16 x s32>)
+# ALL-NEXT: RET 0, implicit $zmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_MUL %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -95,19 +95,19 @@ registers:
- { id: 0, class: _ }
- { id: 1, class: _ }
- { id: 2, class: _ }
-# ALL: %0:_(<8 x s64>) = COPY %zmm0
-# ALL-NEXT: %1:_(<8 x s64>) = COPY %zmm1
+# ALL: %0:_(<8 x s64>) = COPY $zmm0
+# ALL-NEXT: %1:_(<8 x s64>) = COPY $zmm1
# ALL-NEXT: %2:_(<8 x s64>) = G_MUL %0, %1
-# ALL-NEXT: %zmm0 = COPY %2(<8 x s64>)
-# ALL-NEXT: RET 0, implicit %zmm0
+# ALL-NEXT: $zmm0 = COPY %2(<8 x s64>)
+# ALL-NEXT: RET 0, implicit $zmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_MUL %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-or-scalar.mir Wed Jan 31 14:04:26 2018
@@ -41,7 +41,7 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_or_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[TRUNC]], [[TRUNC1]]
@@ -51,7 +51,7 @@ body: |
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]]
; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_OR %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -75,12 +75,12 @@ body: |
; CHECK-LABEL: name: test_or_i8
; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s8) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %al = COPY [[OR]](s8)
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[OR]](s8)
+ ; CHECK: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_OR %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -100,12 +100,12 @@ body: |
; CHECK-LABEL: name: test_or_i16
; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s16) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %ax = COPY [[OR]](s16)
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[OR]](s16)
+ ; CHECK: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_OR %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -125,12 +125,12 @@ body: |
; CHECK-LABEL: name: test_or_i32
; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %eax = COPY [[OR]](s32)
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[OR]](s32)
+ ; CHECK: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_OR %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -150,11 +150,11 @@ body: |
; CHECK-LABEL: name: test_or_i64
; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[DEF]], [[DEF]]
- ; CHECK: %rax = COPY [[OR]](s64)
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[OR]](s64)
+ ; CHECK: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_OR %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-phi.mir Wed Jan 31 14:04:26 2018
@@ -144,10 +144,10 @@ body: |
; ALL-LABEL: name: test_i1
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
@@ -158,16 +158,16 @@ body: |
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC1]](s8), %bb.1, [[TRUNC]](s8), %bb.0
; ALL: [[COPY3:%[0-9]+]]:_(s8) = COPY [[PHI]](s8)
- ; ALL: %al = COPY [[COPY3]](s8)
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[COPY3]](s8)
+ ; ALL: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:_(s32) = COPY %edi
- %3:_(s32) = COPY %esi
+ %0:_(s32) = COPY $edi
+ %3:_(s32) = COPY $esi
%1:_(s1) = G_TRUNC %3(s32)
- %4:_(s32) = COPY %edx
+ %4:_(s32) = COPY $edx
%2:_(s1) = G_TRUNC %4(s32)
%5:_(s32) = G_CONSTANT i32 0
%6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -180,8 +180,8 @@ body: |
bb.3.cond.end:
%7:_(s1) = G_PHI %2(s1), %bb.2, %1(s1), %bb.1
%8:_(s8) = G_ANYEXT %7(s1)
- %al = COPY %8(s8)
- RET 0, implicit %al
+ $al = COPY %8(s8)
+ RET 0, implicit $al
...
---
@@ -210,11 +210,11 @@ body: |
; ALL-LABEL: name: test_i8
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; ALL: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY2]](s32)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
@@ -223,16 +223,16 @@ body: |
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s8) = G_PHI [[TRUNC1]](s8), %bb.1, [[TRUNC]](s8), %bb.0
- ; ALL: %al = COPY [[PHI]](s8)
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[PHI]](s8)
+ ; ALL: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:_(s32) = COPY %edi
- %3:_(s32) = COPY %esi
+ %0:_(s32) = COPY $edi
+ %3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
- %4:_(s32) = COPY %edx
+ %4:_(s32) = COPY $edx
%2:_(s8) = G_TRUNC %4(s32)
%5:_(s32) = G_CONSTANT i32 0
%6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -244,8 +244,8 @@ body: |
bb.3.cond.end:
%7:_(s8) = G_PHI %2(s8), %bb.2, %1(s8), %bb.1
- %al = COPY %7(s8)
- RET 0, implicit %al
+ $al = COPY %7(s8)
+ RET 0, implicit $al
...
---
@@ -274,11 +274,11 @@ body: |
; ALL-LABEL: name: test_i16
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; ALL: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY2]](s32)
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
@@ -287,16 +287,16 @@ body: |
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s16) = G_PHI [[TRUNC1]](s16), %bb.1, [[TRUNC]](s16), %bb.0
- ; ALL: %ax = COPY [[PHI]](s16)
- ; ALL: RET 0, implicit %ax
+ ; ALL: $ax = COPY [[PHI]](s16)
+ ; ALL: RET 0, implicit $ax
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:_(s32) = COPY %edi
- %3:_(s32) = COPY %esi
+ %0:_(s32) = COPY $edi
+ %3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
- %4:_(s32) = COPY %edx
+ %4:_(s32) = COPY $edx
%2:_(s16) = G_TRUNC %4(s32)
%5:_(s32) = G_CONSTANT i32 0
%6:_(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -308,8 +308,8 @@ body: |
bb.3.cond.end:
%7:_(s16) = G_PHI %2(s16), %bb.2, %1(s16), %bb.1
- %ax = COPY %7(s16)
- RET 0, implicit %ax
+ $ax = COPY %7(s16)
+ RET 0, implicit $ax
...
---
@@ -336,10 +336,10 @@ body: |
; ALL-LABEL: name: test_i32
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %edx
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $edx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -351,15 +351,15 @@ body: |
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; ALL: %eax = COPY [[PHI]](s32)
- ; ALL: RET 0, implicit %eax
+ ; ALL: $eax = COPY [[PHI]](s32)
+ ; ALL: RET 0, implicit $eax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
- %2(s32) = COPY %edx
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
+ %2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -376,8 +376,8 @@ body: |
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %eax = COPY %5(s32)
- RET 0, implicit %eax
+ $eax = COPY %5(s32)
+ RET 0, implicit $eax
...
---
@@ -404,10 +404,10 @@ body: |
; ALL-LABEL: name: test_i64
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %rdx, %rsi
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %rsi
- ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %rdx
+ ; ALL: liveins: $edi, $rdx, $rsi
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $rsi
+ ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $rdx
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -419,15 +419,15 @@ body: |
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
- ; ALL: %rax = COPY [[PHI]](s64)
- ; ALL: RET 0, implicit %rax
+ ; ALL: $rax = COPY [[PHI]](s64)
+ ; ALL: RET 0, implicit $rax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %rdx, %rsi
+ liveins: $edi, $rdx, $rsi
- %0(s32) = COPY %edi
- %1(s64) = COPY %rsi
- %2(s64) = COPY %rdx
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $rsi
+ %2(s64) = COPY $rdx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -444,8 +444,8 @@ body: |
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %rax = COPY %5(s64)
- RET 0, implicit %rax
+ $rax = COPY %5(s64)
+ RET 0, implicit $rax
...
---
@@ -475,10 +475,10 @@ body: |
; ALL-LABEL: name: test_float
; ALL: bb.0.{{[a-zA-Z0-9]+}}:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %xmm0, %xmm1
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY %xmm0
- ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY %xmm1
+ ; ALL: liveins: $edi, $xmm0, $xmm1
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s32) = COPY $xmm0
+ ; ALL: [[COPY2:%[0-9]+]]:_(s32) = COPY $xmm1
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -490,15 +490,15 @@ body: |
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; ALL: %xmm0 = COPY [[PHI]](s32)
- ; ALL: RET 0, implicit %xmm0
+ ; ALL: $xmm0 = COPY [[PHI]](s32)
+ ; ALL: RET 0, implicit $xmm0
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s32) = COPY %xmm0
- %2(s32) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $xmm0
+ %2(s32) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -515,8 +515,8 @@ body: |
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %xmm0 = COPY %5(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s32)
+ RET 0, implicit $xmm0
...
---
@@ -543,10 +543,10 @@ body: |
; ALL-LABEL: name: test_double
; ALL: bb.0.{{[a-zA-Z0-9]+}}:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %xmm0, %xmm1
- ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY %xmm0
- ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY %xmm1
+ ; ALL: liveins: $edi, $xmm0, $xmm1
+ ; ALL: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:_(s64) = COPY $xmm0
+ ; ALL: [[COPY2:%[0-9]+]]:_(s64) = COPY $xmm1
; ALL: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; ALL: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; ALL: G_BRCOND [[ICMP]](s1), %bb.1
@@ -558,15 +558,15 @@ body: |
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:_(s64) = G_PHI [[COPY1]](s64), %bb.1, [[COPY2]](s64), %bb.2
- ; ALL: %xmm0 = COPY [[PHI]](s64)
- ; ALL: RET 0, implicit %xmm0
+ ; ALL: $xmm0 = COPY [[PHI]](s64)
+ ; ALL: RET 0, implicit $xmm0
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s64) = COPY %xmm0
- %2(s64) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $xmm0
+ %2(s64) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -583,7 +583,7 @@ body: |
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %xmm0 = COPY %5(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v128.mir Wed Jan 31 14:04:26 2018
@@ -33,7 +33,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v16i8
; ALL: [[DEF:%[0-9]+]]:_(<16 x s8>) = IMPLICIT_DEF
@@ -43,7 +43,7 @@ body: |
%0(<16 x s8>) = IMPLICIT_DEF
%1(<16 x s8>) = IMPLICIT_DEF
%2(<16 x s8>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -58,7 +58,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v8i16
; ALL: [[DEF:%[0-9]+]]:_(<8 x s16>) = IMPLICIT_DEF
@@ -68,7 +68,7 @@ body: |
%0(<8 x s16>) = IMPLICIT_DEF
%1(<8 x s16>) = IMPLICIT_DEF
%2(<8 x s16>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -83,7 +83,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v4i32
; ALL: [[DEF:%[0-9]+]]:_(<4 x s32>) = IMPLICIT_DEF
@@ -93,7 +93,7 @@ body: |
%0(<4 x s32>) = IMPLICIT_DEF
%1(<4 x s32>) = IMPLICIT_DEF
%2(<4 x s32>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
@@ -108,7 +108,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; ALL-LABEL: name: test_sub_v2i64
; ALL: [[DEF:%[0-9]+]]:_(<2 x s64>) = IMPLICIT_DEF
@@ -118,7 +118,7 @@ body: |
%0(<2 x s64>) = IMPLICIT_DEF
%1(<2 x s64>) = IMPLICIT_DEF
%2(<2 x s64>) = G_SUB %0, %1
- %xmm0 = COPY %2
+ $xmm0 = COPY %2
RET 0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v256.mir Wed Jan 31 14:04:26 2018
@@ -34,7 +34,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v32i8
; ALL: [[DEF:%[0-9]+]]:_(<32 x s8>) = IMPLICIT_DEF
@@ -44,7 +44,7 @@ body: |
%0(<32 x s8>) = IMPLICIT_DEF
%1(<32 x s8>) = IMPLICIT_DEF
%2(<32 x s8>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -59,7 +59,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v16i16
; ALL: [[DEF:%[0-9]+]]:_(<16 x s16>) = IMPLICIT_DEF
@@ -69,7 +69,7 @@ body: |
%0(<16 x s16>) = IMPLICIT_DEF
%1(<16 x s16>) = IMPLICIT_DEF
%2(<16 x s16>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -84,7 +84,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v8i32
; ALL: [[DEF:%[0-9]+]]:_(<8 x s32>) = IMPLICIT_DEF
@@ -94,7 +94,7 @@ body: |
%0(<8 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = IMPLICIT_DEF
%2(<8 x s32>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
@@ -109,7 +109,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_sub_v4i64
; ALL: [[DEF:%[0-9]+]]:_(<4 x s64>) = IMPLICIT_DEF
@@ -119,7 +119,7 @@ body: |
%0(<4 x s64>) = IMPLICIT_DEF
%1(<4 x s64>) = IMPLICIT_DEF
%2(<4 x s64>) = G_SUB %0, %1
- %ymm0 = COPY %2
+ $ymm0 = COPY %2
RET 0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub-v512.mir Wed Jan 31 14:04:26 2018
@@ -34,7 +34,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v64i8
; ALL: [[DEF:%[0-9]+]]:_(<64 x s8>) = IMPLICIT_DEF
@@ -44,7 +44,7 @@ body: |
%0(<64 x s8>) = IMPLICIT_DEF
%1(<64 x s8>) = IMPLICIT_DEF
%2(<64 x s8>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -59,7 +59,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v32i16
; ALL: [[DEF:%[0-9]+]]:_(<32 x s16>) = IMPLICIT_DEF
@@ -69,7 +69,7 @@ body: |
%0(<32 x s16>) = IMPLICIT_DEF
%1(<32 x s16>) = IMPLICIT_DEF
%2(<32 x s16>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -84,7 +84,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v16i32
; ALL: [[DEF:%[0-9]+]]:_(<16 x s32>) = IMPLICIT_DEF
@@ -94,7 +94,7 @@ body: |
%0(<16 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = IMPLICIT_DEF
%2(<16 x s32>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
@@ -109,7 +109,7 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v8i64
; ALL: [[DEF:%[0-9]+]]:_(<8 x s64>) = IMPLICIT_DEF
@@ -119,7 +119,7 @@ body: |
%0(<8 x s64>) = IMPLICIT_DEF
%1(<8 x s64>) = IMPLICIT_DEF
%2(<8 x s64>) = G_SUB %0, %1
- %zmm0 = COPY %2
+ $zmm0 = COPY %2
RET 0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-sub.mir Wed Jan 31 14:04:26 2018
@@ -24,7 +24,7 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_sub_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[SUB:%[0-9]+]]:_(s8) = G_SUB [[TRUNC]], [[TRUNC1]]
@@ -34,7 +34,7 @@ body: |
; CHECK: [[AND:%[0-9]+]]:_(s8) = G_AND [[COPY1]], [[C]]
; CHECK: G_STORE [[AND]](s8), [[DEF]](p0) :: (store 1)
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_SUB %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -53,19 +53,19 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_sub_i32
- ; CHECK: liveins: %edi, %esi
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %esi
+ ; CHECK: liveins: $edi, $esi
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $esi
; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[COPY1]]
- ; CHECK: %eax = COPY [[SUB]](s32)
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: $eax = COPY [[SUB]](s32)
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_SUB %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/legalize-xor-scalar.mir Wed Jan 31 14:04:26 2018
@@ -41,12 +41,12 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_xor_i1
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %edx
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $edx
; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
; CHECK: [[XOR:%[0-9]+]]:_(s8) = G_XOR [[TRUNC]], [[TRUNC1]]
; CHECK: RET 0
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s1) = G_TRUNC %0(s32)
%2(s1) = G_XOR %1, %1
%3:_(p0) = G_IMPLICIT_DEF
@@ -70,12 +70,12 @@ body: |
; CHECK-LABEL: name: test_xor_i8
; CHECK: [[DEF:%[0-9]+]]:_(s8) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s8) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %al = COPY [[XOR]](s8)
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[XOR]](s8)
+ ; CHECK: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_XOR %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -95,12 +95,12 @@ body: |
; CHECK-LABEL: name: test_xor_i16
; CHECK: [[DEF:%[0-9]+]]:_(s16) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %ax = COPY [[XOR]](s16)
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[XOR]](s16)
+ ; CHECK: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_XOR %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -120,12 +120,12 @@ body: |
; CHECK-LABEL: name: test_xor_i32
; CHECK: [[DEF:%[0-9]+]]:_(s32) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %eax = COPY [[XOR]](s32)
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[XOR]](s32)
+ ; CHECK: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_XOR %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -145,11 +145,11 @@ body: |
; CHECK-LABEL: name: test_xor_i64
; CHECK: [[DEF:%[0-9]+]]:_(s64) = IMPLICIT_DEF
; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[DEF]], [[DEF]]
- ; CHECK: %rax = COPY [[XOR]](s64)
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[XOR]](s64)
+ ; CHECK: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_XOR %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX2.mir Wed Jan 31 14:04:26 2018
@@ -107,12 +107,12 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1, align 1)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -129,10 +129,10 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %ymm0
+ liveins: $rdi, $ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(p0) = COPY %rdi
+ %0(<8 x s32>) = COPY $ymm0
+ %1(p0) = COPY $rdi
G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1, align 1)
RET 0
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-AVX512.mir Wed Jan 31 14:04:26 2018
@@ -100,12 +100,12 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 1)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -122,10 +122,10 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %zmm0
+ liveins: $rdi, $zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(p0) = COPY %rdi
+ %0(<16 x s32>) = COPY $zmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 1)
RET 0
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir Wed Jan 31 14:04:26 2018
@@ -256,27 +256,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_add_i8
- ; FAST: liveins: %edi, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s8) = COPY %sil
+ ; FAST: liveins: $edi, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s8) = COPY $sil
; FAST: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %al = COPY [[ADD]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ADD]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_add_i8
- ; GREEDY: liveins: %edi, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s8) = COPY %sil
+ ; GREEDY: liveins: $edi, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s8) = COPY $sil
; GREEDY: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %al = COPY [[ADD]](s8)
- ; GREEDY: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; GREEDY: $al = COPY [[ADD]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -292,27 +292,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_add_i16
- ; FAST: liveins: %edi, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s16) = COPY %di
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s16) = COPY %si
+ ; FAST: liveins: $edi, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s16) = COPY $di
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s16) = COPY $si
; FAST: [[ADD:%[0-9]+]]:gpr(s16) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %ax = COPY [[ADD]](s16)
- ; FAST: RET 0, implicit %ax
+ ; FAST: $ax = COPY [[ADD]](s16)
+ ; FAST: RET 0, implicit $ax
; GREEDY-LABEL: name: test_add_i16
- ; GREEDY: liveins: %edi, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s16) = COPY %di
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s16) = COPY %si
+ ; GREEDY: liveins: $edi, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s16) = COPY $di
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s16) = COPY $si
; GREEDY: [[ADD:%[0-9]+]]:gpr(s16) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %ax = COPY [[ADD]](s16)
- ; GREEDY: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; GREEDY: $ax = COPY [[ADD]](s16)
+ ; GREEDY: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_ADD %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -328,27 +328,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_add_i32
- ; FAST: liveins: %edi, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: liveins: $edi, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %eax = COPY [[ADD]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[ADD]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_add_i32
- ; GREEDY: liveins: %edi, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: liveins: $edi, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %eax = COPY [[ADD]](s32)
- ; GREEDY: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; GREEDY: $eax = COPY [[ADD]](s32)
+ ; GREEDY: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_ADD %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -364,27 +364,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; FAST-LABEL: name: test_add_i64
- ; FAST: liveins: %rdi, %rsi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; FAST: liveins: $rdi, $rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; FAST: [[ADD:%[0-9]+]]:gpr(s64) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %rax = COPY [[ADD]](s64)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[ADD]](s64)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_add_i64
- ; GREEDY: liveins: %rdi, %rsi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; GREEDY: liveins: $rdi, $rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; GREEDY: [[ADD:%[0-9]+]]:gpr(s64) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %rax = COPY [[ADD]](s64)
- ; GREEDY: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; GREEDY: $rax = COPY [[ADD]](s64)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_ADD %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -449,27 +449,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_float
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1
; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_float
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm1
; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](s32)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[FADD]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -485,27 +485,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_double
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1
; FAST: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](s64)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](s64)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_double
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s64) = COPY $xmm1
; GREEDY: [[FADD:%[0-9]+]]:vecr(s64) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](s64)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[FADD]](s64)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
---
@@ -650,27 +650,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_v4i32
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; FAST: [[ADD:%[0-9]+]]:vecr(<4 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[ADD]](<4 x s32>)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[ADD]](<4 x s32>)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_v4i32
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; GREEDY: [[ADD:%[0-9]+]]:vecr(<4 x s32>) = G_ADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[ADD]](<4 x s32>)
- ; GREEDY: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[ADD]](<4 x s32>)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -686,27 +686,27 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; FAST-LABEL: name: test_add_v4f32
- ; FAST: liveins: %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; FAST: liveins: $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; FAST: [[FADD:%[0-9]+]]:vecr(<4 x s32>) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](<4 x s32>)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](<4 x s32>)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_add_v4f32
- ; GREEDY: liveins: %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY %xmm1
+ ; GREEDY: liveins: $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(<4 x s32>) = COPY $xmm1
; GREEDY: [[FADD:%[0-9]+]]:vecr(<4 x s32>) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](<4 x s32>)
- ; GREEDY: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; GREEDY: $xmm0 = COPY [[FADD]](<4 x s32>)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_FADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -720,22 +720,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i8
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.p1)
- ; FAST: %al = COPY [[LOAD]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[LOAD]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_load_i8
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s8) = G_LOAD [[COPY]](p0) :: (load 1 from %ir.p1)
- ; GREEDY: %al = COPY [[LOAD]](s8)
- ; GREEDY: RET 0, implicit %al
- %0(p0) = COPY %rdi
+ ; GREEDY: $al = COPY [[LOAD]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0(p0) = COPY $rdi
%1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -749,22 +749,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i16
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s16) = G_LOAD [[COPY]](p0) :: (load 2 from %ir.p1)
- ; FAST: %ax = COPY [[LOAD]](s16)
- ; FAST: RET 0, implicit %ax
+ ; FAST: $ax = COPY [[LOAD]](s16)
+ ; FAST: RET 0, implicit $ax
; GREEDY-LABEL: name: test_load_i16
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s16) = G_LOAD [[COPY]](p0) :: (load 2 from %ir.p1)
- ; GREEDY: %ax = COPY [[LOAD]](s16)
- ; GREEDY: RET 0, implicit %ax
- %0(p0) = COPY %rdi
+ ; GREEDY: $ax = COPY [[LOAD]](s16)
+ ; GREEDY: RET 0, implicit $ax
+ %0(p0) = COPY $rdi
%1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -778,22 +778,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; FAST: %eax = COPY [[LOAD]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[LOAD]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_load_i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; GREEDY: %eax = COPY [[LOAD]](s32)
- ; GREEDY: RET 0, implicit %eax
- %0(p0) = COPY %rdi
+ ; GREEDY: $eax = COPY [[LOAD]](s32)
+ ; GREEDY: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -808,22 +808,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_i64
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; FAST: %rax = COPY [[LOAD]](s64)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[LOAD]](s64)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_load_i64
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; GREEDY: %rax = COPY [[LOAD]](s64)
- ; GREEDY: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; GREEDY: $rax = COPY [[LOAD]](s64)
+ ; GREEDY: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -837,22 +837,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_float
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; FAST: %xmm0 = COPY [[LOAD]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[LOAD]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_float
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.p1)
- ; GREEDY: %xmm0 = COPY [[LOAD]](s32)
- ; GREEDY: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; GREEDY: $xmm0 = COPY [[LOAD]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %xmm0 = COPY %1(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s32)
+ RET 0, implicit $xmm0
...
---
@@ -866,22 +866,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_double
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; FAST: %xmm0 = COPY [[LOAD]](s64)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[LOAD]](s64)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_double
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:gpr(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.p1)
- ; GREEDY: %xmm0 = COPY [[LOAD]](s64)
- ; GREEDY: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; GREEDY: $xmm0 = COPY [[LOAD]](s64)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
@@ -895,22 +895,22 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; FAST-LABEL: name: test_load_v4i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[LOAD:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.p1, align 1)
- ; FAST: %xmm0 = COPY [[LOAD]](<4 x s32>)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[LOAD]](<4 x s32>)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_load_v4i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: [[LOAD:%[0-9]+]]:vecr(<4 x s32>) = G_LOAD [[COPY]](p0) :: (load 16 from %ir.p1, align 1)
- ; GREEDY: %xmm0 = COPY [[LOAD]](<4 x s32>)
- ; GREEDY: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; GREEDY: $xmm0 = COPY [[LOAD]](<4 x s32>)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -924,25 +924,25 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %rsi
+ liveins: $edi, $rsi
; FAST-LABEL: name: test_store_i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; FAST: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; GREEDY: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s32) = COPY %edi
- %1(p0) = COPY %rsi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s32) = COPY $edi
+ %1(p0) = COPY $rsi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -956,25 +956,25 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; FAST-LABEL: name: test_store_i64
- ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; FAST: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_i64
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rsi
; GREEDY: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(p0) = COPY %rsi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(p0) = COPY $rsi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -991,29 +991,29 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; FAST-LABEL: name: test_store_float
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY [[COPY]](s32)
; FAST: G_STORE [[COPY2]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_float
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: G_STORE [[COPY]](s32), [[COPY1]](p0) :: (store 4 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s32) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s32) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -1030,30 +1030,30 @@ registers:
- { id: 1, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; FAST-LABEL: name: test_store_double
- ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; FAST: [[COPY2:%[0-9]+]]:gpr(s64) = COPY [[COPY]](s64)
; FAST: G_STORE [[COPY2]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; FAST: %rax = COPY [[COPY1]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[COPY1]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_store_double
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY %xmm0
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY %rdi
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s64) = COPY $xmm0
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(p0) = COPY $rdi
; GREEDY: G_STORE [[COPY]](s64), [[COPY1]](p0) :: (store 8 into %ir.p1)
- ; GREEDY: %rax = COPY [[COPY1]](p0)
- ; GREEDY: RET 0, implicit %rax
- %0(s64) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; GREEDY: $rax = COPY [[COPY1]](p0)
+ ; GREEDY: RET 0, implicit $rax
+ %0(s64) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -1160,34 +1160,34 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_icmp_eq_i8
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; FAST: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32)
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32)
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i8
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY]](s32)
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s8) = G_TRUNC [[COPY1]](s32)
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s8), [[TRUNC1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %2:_(s32) = COPY %edi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %2:_(s32) = COPY $edi
%0:_(s8) = G_TRUNC %2(s32)
- %3:_(s32) = COPY %esi
+ %3:_(s32) = COPY $esi
%1:_(s8) = G_TRUNC %3(s32)
%4:_(s1) = G_ICMP intpred(eq), %0(s8), %1
%5:_(s8) = G_ANYEXT %4(s1)
- %al = COPY %5(s8)
- RET 0, implicit %al
+ $al = COPY %5(s8)
+ RET 0, implicit $al
...
---
@@ -1201,34 +1201,34 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_icmp_eq_i16
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; FAST: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32)
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i16
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
; GREEDY: [[TRUNC:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY]](s32)
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[TRUNC1:%[0-9]+]]:gpr(s16) = G_TRUNC [[COPY1]](s32)
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[TRUNC]](s16), [[TRUNC1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %2:_(s32) = COPY %edi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %2:_(s32) = COPY $edi
%0:_(s16) = G_TRUNC %2(s32)
- %3:_(s32) = COPY %esi
+ %3:_(s32) = COPY $esi
%1:_(s16) = G_TRUNC %3(s32)
%4:_(s1) = G_ICMP intpred(eq), %0(s16), %1
%5:_(s8) = G_ANYEXT %4(s1)
- %al = COPY %5(s8)
- RET 0, implicit %al
+ $al = COPY %5(s8)
+ RET 0, implicit $al
...
---
@@ -1242,28 +1242,28 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; FAST-LABEL: name: test_icmp_eq_i32
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i32
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s32), [[COPY1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %0:_(s32) = COPY %edi
- %1:_(s32) = COPY %esi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0:_(s32) = COPY $edi
+ %1:_(s32) = COPY $esi
%2:_(s1) = G_ICMP intpred(eq), %0(s32), %1
%3:_(s8) = G_ANYEXT %2(s1)
- %al = COPY %3(s8)
- RET 0, implicit %al
+ $al = COPY %3(s8)
+ RET 0, implicit $al
...
---
@@ -1277,28 +1277,28 @@ registers:
- { id: 2, class: _ }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; FAST-LABEL: name: test_icmp_eq_i64
- ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
; FAST: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; FAST: %al = COPY [[ANYEXT]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ANYEXT]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_icmp_eq_i64
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY %rdi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY %rsi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s64) = COPY $rdi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s64) = COPY $rsi
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(eq), [[COPY]](s64), [[COPY1]]
; GREEDY: [[ANYEXT:%[0-9]+]]:gpr(s8) = G_ANYEXT [[ICMP]](s1)
- ; GREEDY: %al = COPY [[ANYEXT]](s8)
- ; GREEDY: RET 0, implicit %al
- %0:_(s64) = COPY %rdi
- %1:_(s64) = COPY %rsi
+ ; GREEDY: $al = COPY [[ANYEXT]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0:_(s64) = COPY $rdi
+ %1:_(s64) = COPY $rsi
%2:_(s1) = G_ICMP intpred(eq), %0(s64), %1
%3:_(s8) = G_ANYEXT %2(s1)
- %al = COPY %3(s8)
- RET 0, implicit %al
+ $al = COPY %3(s8)
+ RET 0, implicit $al
...
---
@@ -1318,17 +1318,17 @@ body: |
; FAST-LABEL: name: test_xor_i8
; FAST: [[DEF:%[0-9]+]]:gpr(s8) = IMPLICIT_DEF
; FAST: [[XOR:%[0-9]+]]:gpr(s8) = G_XOR [[DEF]], [[DEF]]
- ; FAST: %al = COPY [[XOR]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[XOR]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_xor_i8
; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = IMPLICIT_DEF
; GREEDY: [[XOR:%[0-9]+]]:gpr(s8) = G_XOR [[DEF]], [[DEF]]
- ; GREEDY: %al = COPY [[XOR]](s8)
- ; GREEDY: RET 0, implicit %al
+ ; GREEDY: $al = COPY [[XOR]](s8)
+ ; GREEDY: RET 0, implicit $al
%0(s8) = IMPLICIT_DEF
%1(s8) = G_XOR %0, %0
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -1348,17 +1348,17 @@ body: |
; FAST-LABEL: name: test_or_i16
; FAST: [[DEF:%[0-9]+]]:gpr(s16) = IMPLICIT_DEF
; FAST: [[OR:%[0-9]+]]:gpr(s16) = G_OR [[DEF]], [[DEF]]
- ; FAST: %ax = COPY [[OR]](s16)
- ; FAST: RET 0, implicit %ax
+ ; FAST: $ax = COPY [[OR]](s16)
+ ; FAST: RET 0, implicit $ax
; GREEDY-LABEL: name: test_or_i16
; GREEDY: [[DEF:%[0-9]+]]:gpr(s16) = IMPLICIT_DEF
; GREEDY: [[OR:%[0-9]+]]:gpr(s16) = G_OR [[DEF]], [[DEF]]
- ; GREEDY: %ax = COPY [[OR]](s16)
- ; GREEDY: RET 0, implicit %ax
+ ; GREEDY: $ax = COPY [[OR]](s16)
+ ; GREEDY: RET 0, implicit $ax
%0(s16) = IMPLICIT_DEF
%1(s16) = G_OR %0, %0
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -1378,17 +1378,17 @@ body: |
; FAST-LABEL: name: test_and_i32
; FAST: [[DEF:%[0-9]+]]:gpr(s32) = IMPLICIT_DEF
; FAST: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[DEF]], [[DEF]]
- ; FAST: %eax = COPY [[AND]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[AND]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_and_i32
; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = IMPLICIT_DEF
; GREEDY: [[AND:%[0-9]+]]:gpr(s32) = G_AND [[DEF]], [[DEF]]
- ; GREEDY: %eax = COPY [[AND]](s32)
- ; GREEDY: RET 0, implicit %eax
+ ; GREEDY: $eax = COPY [[AND]](s32)
+ ; GREEDY: RET 0, implicit $eax
%0(s32) = IMPLICIT_DEF
%1(s32) = G_AND %0, %0
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -1408,17 +1408,17 @@ body: |
; FAST-LABEL: name: test_and_i64
; FAST: [[DEF:%[0-9]+]]:gpr(s64) = IMPLICIT_DEF
; FAST: [[AND:%[0-9]+]]:gpr(s64) = G_AND [[DEF]], [[DEF]]
- ; FAST: %rax = COPY [[AND]](s64)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[AND]](s64)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_and_i64
; GREEDY: [[DEF:%[0-9]+]]:gpr(s64) = IMPLICIT_DEF
; GREEDY: [[AND:%[0-9]+]]:gpr(s64) = G_AND [[DEF]], [[DEF]]
- ; GREEDY: %rax = COPY [[AND]](s64)
- ; GREEDY: RET 0, implicit %rax
+ ; GREEDY: $rax = COPY [[AND]](s64)
+ ; GREEDY: RET 0, implicit $rax
%0(s64) = IMPLICIT_DEF
%1(s64) = G_AND %0, %0
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -1432,15 +1432,15 @@ body: |
bb.1.entry:
; FAST-LABEL: name: test_global_ptrv
; FAST: [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @g_int
- ; FAST: %rax = COPY [[GV]](p0)
- ; FAST: RET 0, implicit %rax
+ ; FAST: $rax = COPY [[GV]](p0)
+ ; FAST: RET 0, implicit $rax
; GREEDY-LABEL: name: test_global_ptrv
; GREEDY: [[GV:%[0-9]+]]:gpr(p0) = G_GLOBAL_VALUE @g_int
- ; GREEDY: %rax = COPY [[GV]](p0)
- ; GREEDY: RET 0, implicit %rax
+ ; GREEDY: $rax = COPY [[GV]](p0)
+ ; GREEDY: RET 0, implicit $rax
%0(p0) = G_GLOBAL_VALUE @g_int
- %rax = COPY %0(p0)
- RET 0, implicit %rax
+ $rax = COPY %0(p0)
+ RET 0, implicit $rax
...
---
@@ -1458,15 +1458,15 @@ body: |
bb.1 (%ir-block.0):
; FAST-LABEL: name: test_undef
; FAST: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
- ; FAST: %al = COPY [[DEF]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[DEF]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_undef
; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
- ; GREEDY: %al = COPY [[DEF]](s8)
- ; GREEDY: RET 0, implicit %al
+ ; GREEDY: $al = COPY [[DEF]](s8)
+ ; GREEDY: RET 0, implicit $al
%0(s8) = G_IMPLICIT_DEF
- %al = COPY %0(s8)
- RET 0, implicit %al
+ $al = COPY %0(s8)
+ RET 0, implicit $al
...
---
@@ -1484,25 +1484,25 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; FAST-LABEL: name: test_undef2
- ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
; FAST: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
; FAST: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[DEF]]
- ; FAST: %al = COPY [[ADD]](s8)
- ; FAST: RET 0, implicit %al
+ ; FAST: $al = COPY [[ADD]](s8)
+ ; FAST: RET 0, implicit $al
; GREEDY-LABEL: name: test_undef2
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY %dil
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s8) = COPY $dil
; GREEDY: [[DEF:%[0-9]+]]:gpr(s8) = G_IMPLICIT_DEF
; GREEDY: [[ADD:%[0-9]+]]:gpr(s8) = G_ADD [[COPY]], [[DEF]]
- ; GREEDY: %al = COPY [[ADD]](s8)
- ; GREEDY: RET 0, implicit %al
- %0(s8) = COPY %dil
+ ; GREEDY: $al = COPY [[ADD]](s8)
+ ; GREEDY: RET 0, implicit $al
+ %0(s8) = COPY $dil
%1(s8) = G_IMPLICIT_DEF
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -1520,15 +1520,15 @@ body: |
bb.1 (%ir-block.0):
; FAST-LABEL: name: test_undef3
; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
- ; FAST: %xmm0 = COPY [[DEF]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[DEF]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_undef3
; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
- ; GREEDY: %xmm0 = COPY [[DEF]](s32)
- ; GREEDY: RET 0, implicit %xmm0
+ ; GREEDY: $xmm0 = COPY [[DEF]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
%0(s32) = G_IMPLICIT_DEF
- %xmm0 = COPY %0(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %0(s32)
+ RET 0, implicit $xmm0
...
---
@@ -1546,27 +1546,27 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0
+ liveins: $xmm0
; FAST-LABEL: name: test_undef4
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; FAST: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32)
; FAST: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; FAST: %xmm0 = COPY [[FADD]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FADD]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_undef4
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; GREEDY: [[DEF:%[0-9]+]]:gpr(s32) = G_IMPLICIT_DEF
; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY [[DEF]](s32)
; GREEDY: [[FADD:%[0-9]+]]:vecr(s32) = G_FADD [[COPY]], [[COPY1]]
- ; GREEDY: %xmm0 = COPY [[FADD]](s32)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; GREEDY: $xmm0 = COPY [[FADD]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s32) = G_IMPLICIT_DEF
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -1586,10 +1586,10 @@ body: |
; FAST-LABEL: name: test_i32
; FAST: bb.0.entry:
; FAST: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; FAST: liveins: %edi, %edx, %esi
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
- ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY %edx
+ ; FAST: liveins: $edi, $edx, $esi
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
+ ; FAST: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; FAST: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1601,15 +1601,15 @@ body: |
; FAST: successors: %bb.3(0x80000000)
; FAST: bb.3.cond.end:
; FAST: [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; FAST: %eax = COPY [[PHI]](s32)
- ; FAST: RET 0, implicit %eax
+ ; FAST: $eax = COPY [[PHI]](s32)
+ ; FAST: RET 0, implicit $eax
; GREEDY-LABEL: name: test_i32
; GREEDY: bb.0.entry:
; GREEDY: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; GREEDY: liveins: %edi, %edx, %esi
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY %esi
- ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s32) = COPY %edx
+ ; GREEDY: liveins: $edi, $edx, $esi
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:gpr(s32) = COPY $esi
+ ; GREEDY: [[COPY2:%[0-9]+]]:gpr(s32) = COPY $edx
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1621,15 +1621,15 @@ body: |
; GREEDY: successors: %bb.3(0x80000000)
; GREEDY: bb.3.cond.end:
; GREEDY: [[PHI:%[0-9]+]]:gpr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; GREEDY: %eax = COPY [[PHI]](s32)
- ; GREEDY: RET 0, implicit %eax
+ ; GREEDY: $eax = COPY [[PHI]](s32)
+ ; GREEDY: RET 0, implicit $eax
bb.0.entry:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
- %2(s32) = COPY %edx
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
+ %2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.1
@@ -1645,8 +1645,8 @@ body: |
bb.3.cond.end:
%5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
- %eax = COPY %5(s32)
- RET 0, implicit %eax
+ $eax = COPY %5(s32)
+ RET 0, implicit $eax
...
---
@@ -1666,10 +1666,10 @@ body: |
; FAST-LABEL: name: test_float
; FAST: bb.0.entry:
; FAST: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; FAST: liveins: %edi, %xmm0, %xmm1
- ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; FAST: [[COPY2:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; FAST: liveins: $edi, $xmm0, $xmm1
+ ; FAST: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; FAST: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; FAST: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1
; FAST: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; FAST: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; FAST: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1681,15 +1681,15 @@ body: |
; FAST: successors: %bb.3(0x80000000)
; FAST: bb.3.cond.end:
; FAST: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; FAST: %xmm0 = COPY [[PHI]](s32)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[PHI]](s32)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_float
; GREEDY: bb.0.entry:
; GREEDY: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; GREEDY: liveins: %edi, %xmm0, %xmm1
- ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY %edi
- ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY %xmm0
- ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s32) = COPY %xmm1
+ ; GREEDY: liveins: $edi, $xmm0, $xmm1
+ ; GREEDY: [[COPY:%[0-9]+]]:gpr(s32) = COPY $edi
+ ; GREEDY: [[COPY1:%[0-9]+]]:vecr(s32) = COPY $xmm0
+ ; GREEDY: [[COPY2:%[0-9]+]]:vecr(s32) = COPY $xmm1
; GREEDY: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 0
; GREEDY: [[ICMP:%[0-9]+]]:gpr(s1) = G_ICMP intpred(sgt), [[COPY]](s32), [[C]]
; GREEDY: G_BRCOND [[ICMP]](s1), %bb.1
@@ -1701,15 +1701,15 @@ body: |
; GREEDY: successors: %bb.3(0x80000000)
; GREEDY: bb.3.cond.end:
; GREEDY: [[PHI:%[0-9]+]]:vecr(s32) = G_PHI [[COPY1]](s32), %bb.1, [[COPY2]](s32), %bb.2
- ; GREEDY: %xmm0 = COPY [[PHI]](s32)
- ; GREEDY: RET 0, implicit %xmm0
+ ; GREEDY: $xmm0 = COPY [[PHI]](s32)
+ ; GREEDY: RET 0, implicit $xmm0
bb.0.entry:
successors: %bb.1(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s32) = COPY %xmm0
- %2(s32) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $xmm0
+ %2(s32) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.1
@@ -1725,8 +1725,8 @@ body: |
bb.3.cond.end:
%5(s32) = G_PHI %1(s32), %bb.1, %2(s32), %bb.2
- %xmm0 = COPY %5(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s32)
+ RET 0, implicit $xmm0
...
---
@@ -1739,22 +1739,22 @@ registers:
- { id: 1, class: _, preferred-register: '' }
body: |
bb.1.entry:
- liveins: %xmm0
+ liveins: $xmm0
; FAST-LABEL: name: test_fpext
- ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; FAST: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; FAST: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32)
- ; FAST: %xmm0 = COPY [[FPEXT]](s64)
- ; FAST: RET 0, implicit %xmm0
+ ; FAST: $xmm0 = COPY [[FPEXT]](s64)
+ ; FAST: RET 0, implicit $xmm0
; GREEDY-LABEL: name: test_fpext
- ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY %xmm0
+ ; GREEDY: [[COPY:%[0-9]+]]:vecr(s32) = COPY $xmm0
; GREEDY: [[FPEXT:%[0-9]+]]:vecr(s64) = G_FPEXT [[COPY]](s32)
- ; GREEDY: %xmm0 = COPY [[FPEXT]](s64)
- ; GREEDY: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; GREEDY: $xmm0 = COPY [[FPEXT]](s64)
+ ; GREEDY: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s64) = G_FPEXT %0(s32)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-GV.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-GV.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-GV.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-GV.mir Wed Jan 31 14:04:26 2018
@@ -41,27 +41,27 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
# X64: %0:gr64 = IMPLICIT_DEF
-# X64-NEXT: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg
-# X64-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X64-NEXT: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg
+# X64-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X64-NEXT: RET 0
#
# X64_DARWIN_PIC: %0:gr64 = IMPLICIT_DEF
-# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg
-# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X64_DARWIN_PIC-NEXT: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg
+# X64_DARWIN_PIC-NEXT: MOV64mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X64_DARWIN_PIC-NEXT: RET 0
#
# X32: %0:gr32 = IMPLICIT_DEF
-# X32-NEXT: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg
-# X32-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X32-NEXT: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg
+# X32-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X32-NEXT: RET 0
#
# X32ABI: %0:low32_addr_access = IMPLICIT_DEF
-# X32ABI-NEXT: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg
-# X32ABI-NEXT: MOV32mr %0, 1, %noreg, 0, %noreg, %1 :: (store 8 into `i32** undef`)
+# X32ABI-NEXT: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg
+# X32ABI-NEXT: MOV32mr %0, 1, $noreg, 0, $noreg, %1 :: (store 8 into `i32** undef`)
# X32ABI-NEXT: RET 0
body: |
bb.1.entry:
- liveins: %rdi
+ liveins: $rdi
%0(p0) = IMPLICIT_DEF
%1(p0) = G_GLOBAL_VALUE @g_int
@@ -85,30 +85,30 @@ regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
-# X64: %1:gr64 = LEA64r %noreg, 1, %noreg, @g_int, %noreg
-# X64-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X64-NEXT: %eax = COPY %0
-# X64-NEXT: RET 0, implicit %eax
-#
-# X64_DARWIN_PIC: %1:gr64 = LEA64r %rip, 1, %noreg, @g_int, %noreg
-# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X64_DARWIN_PIC-NEXT: %eax = COPY %0
-# X64_DARWIN_PIC-NEXT: RET 0, implicit %eax
-#
-# X32: %1:gr32 = LEA32r %noreg, 1, %noreg, @g_int, %noreg
-# X32-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X32-NEXT: %eax = COPY %0
-# X32-NEXT: RET 0, implicit %eax
-#
-# X32ABI: %1:gr32 = LEA64_32r %noreg, 1, %noreg, @g_int, %noreg
-# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, %noreg, 0, %noreg :: (load 4 from @g_int)
-# X32ABI-NEXT: %eax = COPY %0
-# X32ABI-NEXT: RET 0, implicit %eax
+# X64: %1:gr64 = LEA64r $noreg, 1, $noreg, @g_int, $noreg
+# X64-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X64-NEXT: $eax = COPY %0
+# X64-NEXT: RET 0, implicit $eax
+#
+# X64_DARWIN_PIC: %1:gr64 = LEA64r $rip, 1, $noreg, @g_int, $noreg
+# X64_DARWIN_PIC-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X64_DARWIN_PIC-NEXT: $eax = COPY %0
+# X64_DARWIN_PIC-NEXT: RET 0, implicit $eax
+#
+# X32: %1:gr32 = LEA32r $noreg, 1, $noreg, @g_int, $noreg
+# X32-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X32-NEXT: $eax = COPY %0
+# X32-NEXT: RET 0, implicit $eax
+#
+# X32ABI: %1:gr32 = LEA64_32r $noreg, 1, $noreg, @g_int, $noreg
+# X32ABI-NEXT: %0:gr32 = MOV32rm %1, 1, $noreg, 0, $noreg :: (load 4 from @g_int)
+# X32ABI-NEXT: $eax = COPY %0
+# X32ABI-NEXT: RET 0, implicit $eax
body: |
bb.1.entry:
%1(p0) = G_GLOBAL_VALUE @g_int
%0(s32) = G_LOAD %1(p0) :: (load 4 from @g_int)
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v128.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v128.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v128.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v128.mir Wed Jan 31 14:04:26 2018
@@ -58,13 +58,13 @@ registers:
# AVX512BWVL: %2:vr128x = VPADDBZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<16 x s8>) = COPY %xmm0
- %1(<16 x s8>) = COPY %xmm1
+ %0(<16 x s8>) = COPY $xmm0
+ %1(<16 x s8>) = COPY $xmm1
%2(<16 x s8>) = G_ADD %0, %1
- %xmm0 = COPY %2(<16 x s8>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<16 x s8>)
+ RET 0, implicit $xmm0
...
---
@@ -100,13 +100,13 @@ registers:
# AVX512BWVL: %2:vr128x = VPADDWZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_ADD %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -142,13 +142,13 @@ registers:
# AVX512BWVL: %2:vr128x = VPADDDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -184,12 +184,12 @@ registers:
# AVX512BWVL: %2:vr128x = VPADDQZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_ADD %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v256.mir Wed Jan 31 14:04:26 2018
@@ -54,13 +54,13 @@ registers:
# AVX512BWVL: %2:vr256x = VPADDBZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<32 x s8>) = COPY %ymm0
- %1(<32 x s8>) = COPY %ymm1
+ %0(<32 x s8>) = COPY $ymm0
+ %1(<32 x s8>) = COPY $ymm1
%2(<32 x s8>) = G_ADD %0, %1
- %ymm0 = COPY %2(<32 x s8>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<32 x s8>)
+ RET 0, implicit $ymm0
...
---
@@ -94,13 +94,13 @@ registers:
# AVX512BWVL: %2:vr256x = VPADDWZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_ADD %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -134,13 +134,13 @@ registers:
# AVX512BWVL: %2:vr256x = VPADDDZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_ADD %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -174,12 +174,12 @@ registers:
# AVX512BWVL: %2:vr256x = VPADDQZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_ADD %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-v512.mir Wed Jan 31 14:04:26 2018
@@ -36,19 +36,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v64i8
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDBZrr:%[0-9]+]]:vr512 = VPADDBZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDBZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<64 x s8>) = COPY %zmm0
- %1(<64 x s8>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDBZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<64 x s8>) = COPY $zmm0
+ %1(<64 x s8>) = COPY $zmm1
%2(<64 x s8>) = G_ADD %0, %1
- %zmm0 = COPY %2(<64 x s8>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<64 x s8>)
+ RET 0, implicit $zmm0
...
---
@@ -62,19 +62,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v32i16
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDWZrr:%[0-9]+]]:vr512 = VPADDWZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDWZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDWZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_ADD %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -88,19 +88,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v16i32
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDDZrr:%[0-9]+]]:vr512 = VPADDDZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDDZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDDZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_ADD %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -114,18 +114,18 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_add_v8i64
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPADDQZrr:%[0-9]+]]:vr512 = VPADDQZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPADDQZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPADDQZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_ADD %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-x32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-x32.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-x32.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-add-x32.mir Wed Jan 31 14:04:26 2018
@@ -30,14 +30,14 @@ body: |
; X32: [[DEF1:%[0-9]+]]:gr32 = IMPLICIT_DEF
; X32: [[DEF2:%[0-9]+]]:gr32 = IMPLICIT_DEF
; X32: [[DEF3:%[0-9]+]]:gr32 = IMPLICIT_DEF
- ; X32: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def %eflags
- ; X32: [[COPY:%[0-9]+]]:gr32 = COPY %eflags
- ; X32: %eflags = COPY [[COPY]]
- ; X32: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def %eflags, implicit %eflags
- ; X32: [[COPY1:%[0-9]+]]:gr32 = COPY %eflags
- ; X32: %eax = COPY [[ADD32rr]]
- ; X32: %edx = COPY [[ADC32rr]]
- ; X32: RET 0, implicit %eax, implicit %edx
+ ; X32: [[ADD32rr:%[0-9]+]]:gr32 = ADD32rr [[DEF]], [[DEF2]], implicit-def $eflags
+ ; X32: [[COPY:%[0-9]+]]:gr32 = COPY $eflags
+ ; X32: $eflags = COPY [[COPY]]
+ ; X32: [[ADC32rr:%[0-9]+]]:gr32 = ADC32rr [[DEF1]], [[DEF3]], implicit-def $eflags, implicit $eflags
+ ; X32: [[COPY1:%[0-9]+]]:gr32 = COPY $eflags
+ ; X32: $eax = COPY [[ADD32rr]]
+ ; X32: $edx = COPY [[ADC32rr]]
+ ; X32: RET 0, implicit $eax, implicit $edx
%0(s32) = IMPLICIT_DEF
%1(s32) = IMPLICIT_DEF
%2(s32) = IMPLICIT_DEF
@@ -46,8 +46,8 @@ body: |
%4(s1) = G_TRUNC %9(s8)
%5(s32), %6(s1) = G_UADDE %0, %2, %4
%7(s32), %8(s1) = G_UADDE %1, %3, %6
- %eax = COPY %5(s32)
- %edx = COPY %7(s32)
- RET 0, implicit %eax, implicit %edx
+ $eax = COPY %5(s32)
+ $edx = COPY %7(s32)
+ RET 0, implicit $eax, implicit $edx
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-add.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-add.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-add.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-add.mir Wed Jan 31 14:04:26 2018
@@ -44,17 +44,17 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr64 = COPY %rdi
-# ALL-NEXT: %1:gr64 = COPY %rsi
+# ALL: %0:gr64 = COPY $rdi
+# ALL-NEXT: %1:gr64 = COPY $rsi
# ALL-NEXT: %2:gr64 = ADD64rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_ADD %0, %1
- %rax = COPY %2(s64)
+ $rax = COPY %2(s64)
...
@@ -67,17 +67,17 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr32 = COPY %edi
-# ALL-NEXT: %1:gr32 = COPY %esi
+# ALL: %0:gr32 = COPY $edi
+# ALL-NEXT: %1:gr32 = COPY $esi
# ALL-NEXT: %2:gr32 = ADD32rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_ADD %0, %1
- %eax = COPY %2(s32)
+ $eax = COPY %2(s32)
...
---
@@ -91,18 +91,18 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr16 = COPY %di
-# ALL: %1:gr16 = COPY %si
-# ALL: %2:gr16 = ADD16rr %0, %1, implicit-def %eflags
+# ALL: %0:gr16 = COPY $di
+# ALL: %1:gr16 = COPY $si
+# ALL: %2:gr16 = ADD16rr %0, %1, implicit-def $eflags
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_ADD %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -116,18 +116,18 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr8 = COPY %dil
-# ALL: %1:gr8 = COPY %sil
-# ALL: %2:gr8 = ADD8rr %0, %1, implicit-def %eflags
+# ALL: %0:gr8 = COPY $dil
+# ALL: %1:gr8 = COPY $sil
+# ALL: %2:gr8 = ADD8rr %0, %1, implicit-def $eflags
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -142,23 +142,23 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# NO_AVX512VL: %0:vr128 = COPY %xmm0
-# NO_AVX512VL: %1:vr128 = COPY %xmm1
+# NO_AVX512VL: %0:vr128 = COPY $xmm0
+# NO_AVX512VL: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = PADDDrr %0, %1
# AVX-NEXT: %2:vr128 = VPADDDrr %0, %1
# AVX512F-NEXT: %2:vr128 = VPADDDrr %0, %1
-# AVX512VL: %0:vr128x = COPY %xmm0
-# AVX512VL: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr128x = COPY $xmm0
+# AVX512VL: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VPADDDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_ADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -173,26 +173,26 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# SSE: %0:vr128 = COPY %xmm0
-# SSE-NEXT: %1:vr128 = COPY %xmm1
+# SSE: %0:vr128 = COPY $xmm0
+# SSE-NEXT: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = ADDPSrr %0, %1
-# AVX: %0:vr128 = COPY %xmm0
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX: %0:vr128 = COPY $xmm0
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr128 = VADDPSrr %0, %1
-# AVX512F: %0:vr128 = COPY %xmm0
-# AVX512F-NEXT: 1:vr128 = COPY %xmm1
+# AVX512F: %0:vr128 = COPY $xmm0
+# AVX512F-NEXT: 1:vr128 = COPY $xmm1
# AVX512F-NEXT: %2:vr128 = VADDPSrr %0, %1
-# AVX512VL: %0:vr128x = COPY %xmm0
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr128x = COPY $xmm0
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VADDPSZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_FADD %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-and-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-and-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-and-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-and-scalar.mir Wed Jan 31 14:04:26 2018
@@ -38,19 +38,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_and_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; ALL: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %al = COPY [[AND8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; ALL: [[AND8rr:%[0-9]+]]:gr8 = AND8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $al = COPY [[AND8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_AND %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -68,19 +68,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_and_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[AND16rr:%[0-9]+]]:gr16 = AND16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[AND16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[AND16rr:%[0-9]+]]:gr16 = AND16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[AND16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_AND %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -98,19 +98,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_and_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[AND32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[AND32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_AND %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -128,18 +128,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_and_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[AND64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[AND64rr:%[0-9]+]]:gr64 = AND64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[AND64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_AND %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsi.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsi.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsi.mir Wed Jan 31 14:04:26 2018
@@ -21,17 +21,17 @@ registers:
# G_SUB and G_AND both use %0 so we should match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsi32rr
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[BLSI32rr:%[0-9]+]]:gr32 = BLSI32rr [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[BLSI32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[BLSI32rr:%[0-9]+]]:gr32 = BLSI32rr [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[BLSI32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 0
%2(s32) = G_SUB %1, %0
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
---
@@ -47,17 +47,17 @@ registers:
# G_SUB and G_AND use different operands so we shouldn't match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsi32rr_nomatch
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; CHECK: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[MOV32r0_]], 0, implicit-def %eflags
- ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri]], [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[AND32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; CHECK: [[SUB32ri:%[0-9]+]]:gr32 = SUB32ri [[MOV32r0_]], 0, implicit-def $eflags
+ ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[SUB32ri]], [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[AND32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 0
%2(s32) = G_SUB %1, %1
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsr.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsr.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-blsr.mir Wed Jan 31 14:04:26 2018
@@ -18,17 +18,17 @@ registers:
# G_ADD and G_AND both use %0 so we should match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsr32rr
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[BLSR32rr:%[0-9]+]]:gr32 = BLSR32rr [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[BLSR32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[BLSR32rr:%[0-9]+]]:gr32 = BLSR32rr [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[BLSR32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 -1
%2(s32) = G_ADD %0, %1
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
---
@@ -44,17 +44,17 @@ registers:
# G_ADD and G_AND use different operands so we shouldn't match this.
body: |
bb.1:
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: test_blsr32rr_nomatch
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 4294967295
- ; CHECK: [[DEC32r:%[0-9]+]]:gr32 = DEC32r [[MOV32ri]], implicit-def %eflags
- ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[DEC32r]], [[COPY]], implicit-def %eflags
- ; CHECK: %edi = COPY [[AND32rr]]
- %0(s32) = COPY %edi
+ ; CHECK: [[DEC32r:%[0-9]+]]:gr32 = DEC32r [[MOV32ri]], implicit-def $eflags
+ ; CHECK: [[AND32rr:%[0-9]+]]:gr32 = AND32rr [[DEC32r]], [[COPY]], implicit-def $eflags
+ ; CHECK: $edi = COPY [[AND32rr]]
+ %0(s32) = COPY $edi
%1(s32) = G_CONSTANT i32 -1
%2(s32) = G_ADD %1, %1
%3(s32) = G_AND %2, %0
- %edi = COPY %3
+ $edi = COPY %3
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-brcond.mir Wed Jan 31 14:04:26 2018
@@ -27,28 +27,28 @@ registers:
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
- { id: 3, class: gpr, preferred-register: '' }
-# X64: %0:gr32 = COPY %edi
-# X32: %0:gr32_abcd = COPY %edi
-# CHECK-NEXT: %2:gr32 = MOV32r0 implicit-def %eflags
+# X64: %0:gr32 = COPY $edi
+# X32: %0:gr32_abcd = COPY $edi
+# CHECK-NEXT: %2:gr32 = MOV32r0 implicit-def $eflags
# CHECK-NEXT: %3:gr32 = MOV32ri 1
# CHECK-NEXT: %1:gr8 = COPY %0.sub_8bit
-# CHECK-NEXT: TEST8ri %1, 1, implicit-def %eflags
-# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9]+]], implicit %eflags
+# CHECK-NEXT: TEST8ri %1, 1, implicit-def $eflags
+# CHECK-NEXT: JNE_1 %[[TRUE:bb.[0-9]+]], implicit $eflags
# CHECK-NEXT: JMP_1 %[[FALSE:bb.[0-9]+]]
# CHECK: [[TRUE]].{{[a-zA-Z0-9]+}}:
-# CHECK-NEXT: %eax = COPY %2
-# CHECK-NEXT: RET 0, implicit %eax
+# CHECK-NEXT: $eax = COPY %2
+# CHECK-NEXT: RET 0, implicit $eax
# CHECK: [[FALSE]].{{[a-zA-Z0-9]+}}:
-# CHECK-NEXT: %eax = COPY %3
-# CHECK-NEXT: RET 0, implicit %eax
+# CHECK-NEXT: $eax = COPY %3
+# CHECK-NEXT: RET 0, implicit $eax
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%2(s32) = G_CONSTANT i32 0
%3(s32) = G_CONSTANT i32 1
%1(s1) = G_TRUNC %0(s32)
@@ -56,11 +56,11 @@ body: |
G_BR %bb.3
bb.2.true:
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
bb.3.false:
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-cmp.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-cmp.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-cmp.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-cmp.mir Wed Jan 31 14:04:26 2018
@@ -93,23 +93,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_eq_i8
- ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; CHECK: CMP8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s1) = G_ICMP intpred(eq), %0(s8), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -124,23 +124,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_eq_i16
- ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; CHECK: CMP16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s1) = G_ICMP intpred(eq), %0(s16), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -155,23 +155,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; CHECK-LABEL: name: test_icmp_eq_i64
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; CHECK: CMP64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s1) = G_ICMP intpred(eq), %0(s64), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -186,23 +186,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_eq_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETEr:%[0-9]+]]:gr8 = SETEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(eq), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -217,23 +217,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ne_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETNEr:%[0-9]+]]:gr8 = SETNEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETNEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ne), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -248,23 +248,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ugt_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETAr:%[0-9]+]]:gr8 = SETAr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ugt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -279,23 +279,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_uge_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETAEr:%[0-9]+]]:gr8 = SETAEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETAEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(uge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -310,23 +310,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ult_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETBr:%[0-9]+]]:gr8 = SETBr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ult), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -341,23 +341,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_ule_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETBEr:%[0-9]+]]:gr8 = SETBEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETBEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(ule), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -372,23 +372,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_sgt_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(sgt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -403,23 +403,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_sge_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETGEr:%[0-9]+]]:gr8 = SETGEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETGEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(sge), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -434,23 +434,23 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_slt_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETLr:%[0-9]+]]:gr8 = SETLr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(slt), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
---
@@ -465,22 +465,22 @@ registers:
- { id: 3, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; CHECK-LABEL: name: test_icmp_sle_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit %eflags
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; CHECK: CMP32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; CHECK: [[SETLEr:%[0-9]+]]:gr8 = SETLEr implicit $eflags
; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr32 = SUBREG_TO_REG 0, [[SETLEr]], %subreg.sub_8bit
- ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; CHECK: %eax = COPY [[AND32ri8_]]
- ; CHECK: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; CHECK: [[AND32ri8_:%[0-9]+]]:gr32 = AND32ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; CHECK: $eax = COPY [[AND32ri8_]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s1) = G_ICMP intpred(sle), %0(s32), %1
%3(s32) = G_ZEXT %2(s1)
- %eax = COPY %3(s32)
- RET 0, implicit %eax
+ $eax = COPY %3(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-constant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-constant.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-constant.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-constant.mir Wed Jan 31 14:04:26 2018
@@ -47,11 +47,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i8
; CHECK: [[MOV8ri:%[0-9]+]]:gr8 = MOV8ri 2
- ; CHECK: %al = COPY [[MOV8ri]]
- ; CHECK: RET 0, implicit %al
+ ; CHECK: $al = COPY [[MOV8ri]]
+ ; CHECK: RET 0, implicit $al
%0(s8) = G_CONSTANT i8 2
- %al = COPY %0(s8)
- RET 0, implicit %al
+ $al = COPY %0(s8)
+ RET 0, implicit $al
...
---
@@ -65,11 +65,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i16
; CHECK: [[MOV16ri:%[0-9]+]]:gr16 = MOV16ri 3
- ; CHECK: %ax = COPY [[MOV16ri]]
- ; CHECK: RET 0, implicit %ax
+ ; CHECK: $ax = COPY [[MOV16ri]]
+ ; CHECK: RET 0, implicit $ax
%0(s16) = G_CONSTANT i16 3
- %ax = COPY %0(s16)
- RET 0, implicit %ax
+ $ax = COPY %0(s16)
+ RET 0, implicit $ax
...
---
@@ -83,11 +83,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 4
- ; CHECK: %eax = COPY [[MOV32ri]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[MOV32ri]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 4
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
@@ -99,12 +99,12 @@ registers:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_0
- ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; CHECK: %eax = COPY [[MOV32r0_]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; CHECK: $eax = COPY [[MOV32r0_]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 0
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
@@ -118,11 +118,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i64
; CHECK: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri 68719476720
- ; CHECK: %rax = COPY [[MOV64ri]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[MOV64ri]]
+ ; CHECK: RET 0, implicit $rax
%0(s64) = G_CONSTANT i64 68719476720
- %rax = COPY %0(s64)
- RET 0, implicit %rax
+ $rax = COPY %0(s64)
+ RET 0, implicit $rax
...
---
@@ -137,11 +137,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i64_u32
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 1879048192
- ; CHECK: %rax = COPY [[MOV64ri32_]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[MOV64ri32_]]
+ ; CHECK: RET 0, implicit $rax
%0(s64) = G_CONSTANT i64 1879048192
- %rax = COPY %0(s64)
- RET 0, implicit %rax
+ $rax = COPY %0(s64)
+ RET 0, implicit $rax
...
---
@@ -155,11 +155,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i64_i32
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 -1
- ; CHECK: %rax = COPY [[MOV64ri32_]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: $rax = COPY [[MOV64ri32_]]
+ ; CHECK: RET 0, implicit $rax
%0(s64) = G_CONSTANT i64 -1
- %rax = COPY %0(s64)
- RET 0, implicit %rax
+ $rax = COPY %0(s64)
+ RET 0, implicit $rax
...
---
@@ -172,14 +172,14 @@ registers:
- { id: 1, class: gpr, preferred-register: '' }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: main
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64 = MOV64ri32 0
- ; CHECK: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[MOV64ri32_]] :: (store 8 into %ir.data)
+ ; CHECK: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[MOV64ri32_]] :: (store 8 into %ir.data)
; CHECK: RET 0
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(p0) = G_CONSTANT i64 0
G_STORE %1(p0), %0(p0) :: (store 8 into %ir.data)
RET 0
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-copy.mir Wed Jan 31 14:04:26 2018
@@ -40,18 +40,18 @@ regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
-# ALL %0:gr8 = COPY %al
+# ALL %0:gr8 = COPY $al
# ALL-NEXT %1:gr32 = MOVZX32rr8 %0
-# ALL-NEXT %eax = COPY %1
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %1
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s8) = COPY %al
+ %0(s8) = COPY $al
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -66,18 +66,18 @@ regBankSelected: true
registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
-# ALL: %0:gr8 = COPY %al
+# ALL: %0:gr8 = COPY $al
# ALL-NEXT: %1:gr32 = MOVZX32rr8 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s8) = COPY %al
+ %0(s8) = COPY $al
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -94,20 +94,20 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr16 = COPY %ax
+# ALL %0:gr16 = COPY $ax
# ALL-NEXT %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT %2:gr32 = MOVZX32rr8 %1
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s16) = COPY %ax
+ %0(s16) = COPY $ax
%1(s8) = G_TRUNC %0(s16)
%2(s32) = G_ZEXT %1(s8)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -124,20 +124,20 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr32 = COPY %eax
+# ALL %0:gr32 = COPY $eax
# ALL-NEXT %1:gr16 = COPY %0.sub_16bit
# ALL-NEXT %2:gr32 = MOVZX32rr16 %1
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax
+ liveins: $eax
- %0(s32) = COPY %eax
+ %0(s32) = COPY $eax
%1(s16) = G_TRUNC %0(s32)
%2(s32) = G_ZEXT %1(s16)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -154,20 +154,20 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr32[[ABCD]] = COPY %edx
+# ALL %0:gr32[[ABCD]] = COPY $edx
# ALL-NEXT %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT %2:gr32 = MOVZX32rr8 %1
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax,%edx
+ liveins: $eax,$edx
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s8) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s8)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -184,20 +184,20 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# ALL %0:gr32 = COPY %edx
+# ALL %0:gr32 = COPY $edx
# ALL-NEXT %1:gr16 = COPY %0.sub_16bit
# ALL-NEXT %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_16bit
-# ALL-NEXT %eax = COPY %2
-# ALL-NEXT RET 0, implicit %eax
+# ALL-NEXT $eax = COPY %2
+# ALL-NEXT RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %eax,%edx
+ liveins: $eax,$edx
- %0(s32) = COPY %edx
+ %0(s32) = COPY $edx
%1(s16) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s16)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir Wed Jan 31 14:04:26 2018
@@ -34,19 +34,19 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test_zext_i1
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_8bit
- ; ALL: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[SUBREG_TO_REG]], 1, implicit-def %eflags
- ; ALL: %rax = COPY [[AND64ri8_]]
- ; ALL: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; ALL: [[AND64ri8_:%[0-9]+]]:gr64 = AND64ri8 [[SUBREG_TO_REG]], 1, implicit-def $eflags
+ ; ALL: $rax = COPY [[AND64ri8_]]
+ ; ALL: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s1) = G_TRUNC %0(s8)
%2(s64) = G_ZEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
@@ -59,17 +59,17 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test_sext_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
; ALL: [[MOVSX64rr8_:%[0-9]+]]:gr64 = MOVSX64rr8 [[COPY]]
- ; ALL: %rax = COPY [[MOVSX64rr8_]]
- ; ALL: RET 0, implicit %rax
- %0(s8) = COPY %dil
+ ; ALL: $rax = COPY [[MOVSX64rr8_]]
+ ; ALL: RET 0, implicit $rax
+ %0(s8) = COPY $dil
%1(s64) = G_SEXT %0(s8)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -82,17 +82,17 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test_sext_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
; ALL: [[MOVSX64rr16_:%[0-9]+]]:gr64 = MOVSX64rr16 [[COPY]]
- ; ALL: %rax = COPY [[MOVSX64rr16_]]
- ; ALL: RET 0, implicit %rax
- %0(s16) = COPY %di
+ ; ALL: $rax = COPY [[MOVSX64rr16_]]
+ ; ALL: RET 0, implicit $rax
+ %0(s16) = COPY $di
%1(s64) = G_SEXT %0(s16)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -106,19 +106,19 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s1
- ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_8bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s1) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s1)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
name: anyext_s64_from_s8
@@ -131,19 +131,19 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s8
- ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_8bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s8) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s8)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
name: anyext_s64_from_s16
@@ -156,19 +156,19 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s16
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_16bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s16) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s16)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
---
name: anyext_s64_from_s32
@@ -181,17 +181,17 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: anyext_s64_from_s32
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit
; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_32bit
- ; ALL: %rax = COPY [[SUBREG_TO_REG]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
+ ; ALL: $rax = COPY [[SUBREG_TO_REG]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
%1(s32) = G_TRUNC %0(s64)
%2(s64) = G_ANYEXT %1(s32)
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-ext.mir Wed Jan 31 14:04:26 2018
@@ -64,21 +64,21 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL_NEXT: %1:gr8 = COPY %0.sub_8bit
-# ALL_NEXT: %2:gr8 = AND8ri %1, 1, implicit-def %eflags
-# ALL_NEXT: %al = COPY %2
-# ALL_NEXT: RET 0, implicit %al
+# ALL_NEXT: %2:gr8 = AND8ri %1, 1, implicit-def $eflags
+# ALL_NEXT: $al = COPY %2
+# ALL_NEXT: RET 0, implicit $al
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ZEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -102,22 +102,22 @@ registers:
- { id: 0, class: gpr, preferred-register: '' }
- { id: 1, class: gpr, preferred-register: '' }
- { id: 2, class: gpr, preferred-register: '' }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL_NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL_NEXT: %3:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL_NEXT: %2:gr16 = AND16ri8 %3, 1, implicit-def %eflags
-# ALL_NEXT: %ax = COPY %2
-# ALL_NEXT: RET 0, implicit %ax
+# ALL_NEXT: %2:gr16 = AND16ri8 %3, 1, implicit-def $eflags
+# ALL_NEXT: $ax = COPY %2
+# ALL_NEXT: RET 0, implicit $ax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s16) = G_ZEXT %1(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -141,22 +141,22 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL_NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL_NEXT: %3:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL_NEXT: %2:gr32 = AND32ri8 %3, 1, implicit-def %eflags
-# ALL_NEXT: %eax = COPY %2
-# ALL_NEXT: RET 0, implicit %eax
+# ALL_NEXT: %2:gr32 = AND32ri8 %3, 1, implicit-def $eflags
+# ALL_NEXT: $eax = COPY %2
+# ALL_NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s32) = G_ZEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -171,18 +171,18 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr8 = COPY %dil
+# ALL: %0:gr8 = COPY $dil
# ALL-NEXT: %1:gr32 = MOVZX32rr8 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s8) = COPY %dil
+ %0(s8) = COPY $dil
%1(s32) = G_ZEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -197,18 +197,18 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr16 = COPY %di
+# ALL: %0:gr16 = COPY $di
# ALL-NEXT: %1:gr32 = MOVZX32rr16 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s16) = COPY %di
+ %0(s16) = COPY $di
%1(s32) = G_ZEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -223,18 +223,18 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr8 = COPY %dil
+# ALL: %0:gr8 = COPY $dil
# ALL-NEXT: %1:gr32 = MOVSX32rr8 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s8) = COPY %dil
+ %0(s8) = COPY $dil
%1(s32) = G_SEXT %0(s8)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -249,18 +249,18 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
-# ALL: %0:gr16 = COPY %di
+# ALL: %0:gr16 = COPY $di
# ALL-NEXT: %1:gr32 = MOVSX32rr16 %0
-# ALL-NEXT: %eax = COPY %1
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %1
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s16) = COPY %di
+ %0(s16) = COPY $di
%1(s32) = G_SEXT %0(s16)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -282,20 +282,20 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
-# ALL-NEXT: %al = COPY %1
-# ALL-NEXT: RET 0, implicit %al
+# ALL-NEXT: $al = COPY %1
+# ALL-NEXT: RET 0, implicit $al
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ANYEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
name: test_anyext_i1toi16
@@ -316,21 +316,21 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL-NEXT: %ax = COPY %2
-# ALL-NEXT: RET 0, implicit %ax
+# ALL-NEXT: $ax = COPY %2
+# ALL-NEXT: RET 0, implicit $ax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s16) = G_ANYEXT %1(s1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
name: test_anyext_i1toi32
@@ -351,21 +351,21 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL-NEXT: %eax = COPY %2
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
name: test_anyext_i8toi16
@@ -386,21 +386,21 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr16 = SUBREG_TO_REG 0, %1, %subreg.sub_8bit
-# ALL-NEXT: %ax = COPY %2
-# ALL-NEXT: RET 0, implicit %ax
+# ALL-NEXT: $ax = COPY %2
+# ALL-NEXT: RET 0, implicit $ax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s8) = G_TRUNC %0(s32)
%2(s16) = G_ANYEXT %1(s8)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
name: test_anyext_i8toi32
@@ -421,21 +421,21 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# X32: %0:gr32_abcd = COPY %edi
-# X64: %0:gr32 = COPY %edi
+# X32: %0:gr32_abcd = COPY $edi
+# X64: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr8 = COPY %0.sub_8bit
# ALL-NEXT: %2:gr32 = MOVZX32rr8 %1
-# ALL-NEXT: %eax = COPY %2
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s8) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s8)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
name: test_anyext_i16toi32
@@ -451,18 +451,18 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr32 = COPY %edi
+# ALL: %0:gr32 = COPY $edi
# ALL-NEXT: %1:gr16 = COPY %0.sub_16bit
# ALL-NEXT: %2:gr32 = SUBREG_TO_REG 0, %1, %subreg.sub_16bit
-# ALL-NEXT: %eax = COPY %2
-# ALL-NEXT: RET 0, implicit %eax
+# ALL-NEXT: $eax = COPY %2
+# ALL-NEXT: RET 0, implicit $eax
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
- %0(s32) = COPY %edi
+ %0(s32) = COPY $edi
%1(s16) = G_TRUNC %0(s32)
%2(s32) = G_ANYEXT %1(s16)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec256.mir Wed Jan 31 14:04:26 2018
@@ -27,20 +27,20 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# AVX: %0:vr256 = COPY %ymm1
+# AVX: %0:vr256 = COPY $ymm1
# AVX-NEXT: %1:vr128 = COPY %0.sub_xmm
-# AVX512VL: %0:vr256x = COPY %ymm1
+# AVX512VL: %0:vr256x = COPY $ymm1
# AVX512VL-NEXT: %1:vr128x = COPY %0.sub_xmm
-# ALL-NEXT: %xmm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm1
+ liveins: $ymm1
- %0(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm1
%1(<4 x s32>) = G_EXTRACT %0(<8 x s32>), 0
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -59,22 +59,22 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# AVX: %0:vr256 = COPY %ymm1
+# AVX: %0:vr256 = COPY $ymm1
# AVX-NEXT: %1:vr128 = VEXTRACTF128rr %0, 1
-# AVX-NEXT: %xmm0 = COPY %1
-# AVX-NEXT: RET 0, implicit %xmm0
+# AVX-NEXT: $xmm0 = COPY %1
+# AVX-NEXT: RET 0, implicit $xmm0
#
-# AVX512VL: %0:vr256x = COPY %ymm1
+# AVX512VL: %0:vr256x = COPY $ymm1
# AVX512VL-NEXT: %1:vr128x = VEXTRACTF32x4Z256rr %0, 1
-# AVX512VL-NEXT: %xmm0 = COPY %1
-# AVX512VL-NEXT: RET 0, implicit %xmm0
+# AVX512VL-NEXT: $xmm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm1
+ liveins: $ymm1
- %0(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm1
%1(<4 x s32>) = G_EXTRACT %0(<8 x s32>), 128
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-extract-vec512.mir Wed Jan 31 14:04:26 2018
@@ -32,18 +32,18 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr128x = COPY %0.sub_xmm
-# ALL-NEXT: %xmm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<4 x s32>) = G_EXTRACT %0(<16 x s32>), 0
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -58,18 +58,18 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr128x = VEXTRACTF32x4Zrr %0, 1
-# ALL-NEXT: %xmm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<4 x s32>) = G_EXTRACT %0(<16 x s32>), 128
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -84,18 +84,18 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr256x = COPY %0.sub_ymm
-# ALL-NEXT: %ymm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<8 x s32>) = G_EXTRACT %0(<16 x s32>), 0
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -110,17 +110,17 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
-# ALL: %0:vr512 = COPY %zmm1
+# ALL: %0:vr512 = COPY $zmm1
# ALL-NEXT: %1:vr256x = VEXTRACTF64x4Zrr %0, 1
-# ALL-NEXT: %ymm0 = COPY %1
-# ALL-NEXT: RET 0, implicit %ymm0
+# ALL-NEXT: $ymm0 = COPY %1
+# ALL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %zmm1
+ liveins: $zmm1
- %0(<16 x s32>) = COPY %zmm1
+ %0(<16 x s32>) = COPY $zmm1
%1(<8 x s32>) = G_EXTRACT %0(<16 x s32>), 256
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fadd-scalar.mir Wed Jan 31 14:04:26 2018
@@ -34,37 +34,37 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fadd_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[ADDSSrr:%[0-9]+]]:fr32 = ADDSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[ADDSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[ADDSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VADDSSrr:%[0-9]+]]:fr32 = VADDSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VADDSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VADDSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VADDSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VADDSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VADDSSZrr:%[0-9]+]]:fr32x = VADDSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VADDSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VADDSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FADD %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fadd_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[ADDSDrr:%[0-9]+]]:fr64 = ADDSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[ADDSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[ADDSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fadd_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VADDSDrr:%[0-9]+]]:fr64 = VADDSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VADDSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VADDSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fadd_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VADDSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VADDSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fadd_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VADDSDZrr:%[0-9]+]]:fr64x = VADDSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VADDSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VADDSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FADD %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fconstant.mir Wed Jan 31 14:04:26 2018
@@ -29,29 +29,29 @@ registers:
body: |
bb.1.entry:
; CHECK_NOPIC64-LABEL: name: test_float
- ; CHECK_NOPIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_NOPIC64: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_NOPIC64: RET 0, implicit %xmm0
+ ; CHECK_NOPIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_NOPIC64: RET 0, implicit $xmm0
; CHECK_LARGE64-LABEL: name: test_float
; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0
- ; CHECK_LARGE64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[MOV64ri]], 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 32)
- ; CHECK_LARGE64: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_LARGE64: RET 0, implicit %xmm0
+ ; CHECK_LARGE64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 32)
+ ; CHECK_LARGE64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE64: RET 0, implicit $xmm0
; CHECK_SMALL32-LABEL: name: test_float
- ; CHECK_SMALL32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_SMALL32: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_SMALL32: RET 0, implicit %xmm0
+ ; CHECK_SMALL32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_SMALL32: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_SMALL32: RET 0, implicit $xmm0
; CHECK_LARGE32-LABEL: name: test_float
- ; CHECK_LARGE32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_LARGE32: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_LARGE32: RET 0, implicit %xmm0
+ ; CHECK_LARGE32: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_LARGE32: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_LARGE32: RET 0, implicit $xmm0
; CHECK_PIC64-LABEL: name: test_float
- ; CHECK_PIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_PIC64: %xmm0 = COPY [[MOVSSrm]]
- ; CHECK_PIC64: RET 0, implicit %xmm0
+ ; CHECK_PIC64: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_PIC64: $xmm0 = COPY [[MOVSSrm]]
+ ; CHECK_PIC64: RET 0, implicit $xmm0
%0(s32) = G_FCONSTANT float 5.500000e+00
- %xmm0 = COPY %0(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %0(s32)
+ RET 0, implicit $xmm0
...
---
@@ -70,28 +70,28 @@ registers:
body: |
bb.1.entry:
; CHECK_NOPIC64-LABEL: name: test_double
- ; CHECK_NOPIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_NOPIC64: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_NOPIC64: RET 0, implicit %xmm0
+ ; CHECK_NOPIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_NOPIC64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_NOPIC64: RET 0, implicit $xmm0
; CHECK_LARGE64-LABEL: name: test_double
; CHECK_LARGE64: [[MOV64ri:%[0-9]+]]:gr64 = MOV64ri %const.0
- ; CHECK_LARGE64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[MOV64ri]], 1, %noreg, 0, %noreg :: (load 8 from constant-pool, align 64)
- ; CHECK_LARGE64: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_LARGE64: RET 0, implicit %xmm0
+ ; CHECK_LARGE64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[MOV64ri]], 1, $noreg, 0, $noreg :: (load 8 from constant-pool, align 64)
+ ; CHECK_LARGE64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE64: RET 0, implicit $xmm0
; CHECK_SMALL32-LABEL: name: test_double
- ; CHECK_SMALL32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_SMALL32: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_SMALL32: RET 0, implicit %xmm0
+ ; CHECK_SMALL32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_SMALL32: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_SMALL32: RET 0, implicit $xmm0
; CHECK_LARGE32-LABEL: name: test_double
- ; CHECK_LARGE32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %noreg, 1, %noreg, %const.0, %noreg
- ; CHECK_LARGE32: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_LARGE32: RET 0, implicit %xmm0
+ ; CHECK_LARGE32: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $noreg, 1, $noreg, %const.0, $noreg
+ ; CHECK_LARGE32: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_LARGE32: RET 0, implicit $xmm0
; CHECK_PIC64-LABEL: name: test_double
- ; CHECK_PIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm %rip, 1, %noreg, %const.0, %noreg
- ; CHECK_PIC64: %xmm0 = COPY [[MOVSDrm]]
- ; CHECK_PIC64: RET 0, implicit %xmm0
+ ; CHECK_PIC64: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm $rip, 1, $noreg, %const.0, $noreg
+ ; CHECK_PIC64: $xmm0 = COPY [[MOVSDrm]]
+ ; CHECK_PIC64: RET 0, implicit $xmm0
%0(s64) = G_FCONSTANT double 5.500000e+00
- %xmm0 = COPY %0(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %0(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fdiv-scalar.mir Wed Jan 31 14:04:26 2018
@@ -34,37 +34,37 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fdiv_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[DIVSSrr:%[0-9]+]]:fr32 = DIVSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[DIVSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[DIVSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VDIVSSrr:%[0-9]+]]:fr32 = VDIVSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VDIVSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VDIVSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VDIVSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VDIVSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VDIVSSZrr:%[0-9]+]]:fr32x = VDIVSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VDIVSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VDIVSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FDIV %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fdiv_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[DIVSDrr:%[0-9]+]]:fr64 = DIVSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[DIVSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[DIVSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fdiv_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VDIVSDrr:%[0-9]+]]:fr64 = VDIVSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VDIVSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VDIVSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fdiv_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VDIVSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VDIVSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fdiv_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VDIVSDZrr:%[0-9]+]]:fr64x = VDIVSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VDIVSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VDIVSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FDIV %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fmul-scalar.mir Wed Jan 31 14:04:26 2018
@@ -34,37 +34,37 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fmul_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[MULSSrr:%[0-9]+]]:fr32 = MULSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[MULSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[MULSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VMULSSrr:%[0-9]+]]:fr32 = VMULSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VMULSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VMULSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VMULSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VMULSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VMULSSZrr:%[0-9]+]]:fr32x = VMULSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VMULSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VMULSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FMUL %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fmul_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[MULSDrr:%[0-9]+]]:fr64 = MULSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[MULSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[MULSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fmul_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VMULSDrr:%[0-9]+]]:fr64 = VMULSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VMULSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VMULSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fmul_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VMULSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VMULSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fmul_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VMULSDZrr:%[0-9]+]]:fr64x = VMULSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VMULSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VMULSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FMUL %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fpext-scalar.mir Wed Jan 31 14:04:26 2018
@@ -23,16 +23,16 @@ stack:
constants:
body: |
bb.1.entry:
- liveins: %xmm0
+ liveins: $xmm0
; ALL-LABEL: name: test
- ; ALL: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
+ ; ALL: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
; ALL: [[CVTSS2SDrr:%[0-9]+]]:fr64 = CVTSS2SDrr [[COPY]]
- ; ALL: %xmm0 = COPY [[CVTSS2SDrr]]
- ; ALL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
+ ; ALL: $xmm0 = COPY [[CVTSS2SDrr]]
+ ; ALL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
%1(s64) = G_FPEXT %0(s32)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-fsub-scalar.mir Wed Jan 31 14:04:26 2018
@@ -34,37 +34,37 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fsub_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; SSE: [[SUBSSrr:%[0-9]+]]:fr32 = SUBSSrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[SUBSSrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[SUBSSrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr32 = COPY $xmm1
; AVX: [[VSUBSSrr:%[0-9]+]]:fr32 = VSUBSSrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VSUBSSrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VSUBSSrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512F: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VSUBSSZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VSUBSSZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr32x = COPY $xmm1
; AVX512VL: [[VSUBSSZrr:%[0-9]+]]:fr32x = VSUBSSZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VSUBSSZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s32) = COPY %xmm0
- %1(s32) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VSUBSSZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s32) = COPY $xmm0
+ %1(s32) = COPY $xmm1
%2(s32) = G_FSUB %0, %1
- %xmm0 = COPY %2(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s32)
+ RET 0, implicit $xmm0
...
---
@@ -85,36 +85,36 @@ constants:
#
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; SSE-LABEL: name: test_fsub_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; SSE: [[SUBSDrr:%[0-9]+]]:fr64 = SUBSDrr [[COPY]], [[COPY1]]
- ; SSE: %xmm0 = COPY [[SUBSDrr]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: $xmm0 = COPY [[SUBSDrr]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_fsub_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY %xmm1
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:fr64 = COPY $xmm1
; AVX: [[VSUBSDrr:%[0-9]+]]:fr64 = VSUBSDrr [[COPY]], [[COPY1]]
- ; AVX: %xmm0 = COPY [[VSUBSDrr]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: $xmm0 = COPY [[VSUBSDrr]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_fsub_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512F: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]]
- ; AVX512F: %xmm0 = COPY [[VSUBSDZrr]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: $xmm0 = COPY [[VSUBSDZrr]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_fsub_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY %xmm1
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:fr64x = COPY $xmm1
; AVX512VL: [[VSUBSDZrr:%[0-9]+]]:fr64x = VSUBSDZrr [[COPY]], [[COPY1]]
- ; AVX512VL: %xmm0 = COPY [[VSUBSDZrr]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(s64) = COPY %xmm0
- %1(s64) = COPY %xmm1
+ ; AVX512VL: $xmm0 = COPY [[VSUBSDZrr]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(s64) = COPY $xmm0
+ %1(s64) = COPY $xmm1
%2(s64) = G_FSUB %0, %1
- %xmm0 = COPY %2(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-gep.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-gep.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-gep.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-gep.mir Wed Jan 31 14:04:26 2018
@@ -19,18 +19,18 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: test_gep_i32
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[MOV64ri32_:%[0-9]+]]:gr64_nosp = MOV64ri32 20
- ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, %noreg
- ; CHECK: %rax = COPY [[LEA64r]]
- ; CHECK: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri32_]], 0, $noreg
+ ; CHECK: $rax = COPY [[LEA64r]]
+ ; CHECK: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(s64) = G_CONSTANT i64 20
%2(p0) = G_GEP %0, %1(s64)
- %rax = COPY %2(p0)
- RET 0, implicit %rax
+ $rax = COPY %2(p0)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-inc.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-inc.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-inc.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-inc.mir Wed Jan 31 14:04:26 2018
@@ -21,16 +21,16 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr8 = COPY %al
+# ALL: %0:gr8 = COPY $al
# INC-NEXT: %2:gr8 = INC8r %0
# ADD-NEXT: %2:gr8 = ADD8ri %0, 1
body: |
bb.1 (%ir-block.0):
- liveins: %al
+ liveins: $al
- %0(s8) = COPY %al
+ %0(s8) = COPY $al
%1(s8) = G_CONSTANT i8 1
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
+ $al = COPY %2(s8)
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec256.mir Wed Jan 31 14:04:26 2018
@@ -28,26 +28,26 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# AVX: %0:vr256 = COPY %ymm0
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX: %0:vr256 = COPY $ymm0
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 0
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 0
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -60,24 +60,24 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# AVX: %1:vr128 = COPY %xmm1
+# AVX: %1:vr128 = COPY $xmm1
# AVX-NEXT: undef %2.sub_xmm:vr256 = COPY %1
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %1:vr128x = COPY %xmm1
+# AVX512VL: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: undef %2.sub_xmm:vr256x = COPY %1
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
%0(<8 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 0
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -90,26 +90,26 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# AVX: %0:vr256 = COPY %ymm0
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX: %0:vr256 = COPY $ymm0
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_128_idx1_undef
@@ -122,23 +122,23 @@ registers:
- { id: 1, class: vecr }
- { id: 2, class: vecr }
# AVX: %0:vr256 = IMPLICIT_DEF
-# AVX-NEXT: %1:vr128 = COPY %xmm1
+# AVX-NEXT: %1:vr128 = COPY $xmm1
# AVX-NEXT: %2:vr256 = VINSERTF128rr %0, %1, 1
-# AVX-NEXT: %ymm0 = COPY %2
-# AVX-NEXT: RET 0, implicit %ymm0
+# AVX-NEXT: $ymm0 = COPY %2
+# AVX-NEXT: RET 0, implicit $ymm0
#
# AVX512VL: %0:vr256x = IMPLICIT_DEF
-# AVX512VL-NEXT: %1:vr128x = COPY %xmm1
+# AVX512VL-NEXT: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr256x = VINSERTF32x4Z256rr %0, %1, 1
-# AVX512VL-NEXT: %ymm0 = COPY %2
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# AVX512VL-NEXT: $ymm0 = COPY %2
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
%0(<8 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<8 x s32>) = G_INSERT %0(<8 x s32>), %1(<4 x s32>), 128
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-insert-vec512.mir Wed Jan 31 14:04:26 2018
@@ -46,19 +46,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx0
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 0
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -72,18 +72,18 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx0_undef
- ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: undef %2.sub_xmm:vr512 = COPY [[COPY]]
- ; ALL: %zmm0 = COPY %2
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY %2
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -97,19 +97,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx1
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[COPY]], [[COPY1]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_128_idx1_undef
@@ -122,19 +122,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_128_idx1_undef
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
- ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY %xmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr128x = COPY $xmm1
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[DEF]], [[COPY]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<4 x s32>) = COPY %xmm1
+ %1(<4 x s32>) = COPY $xmm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<4 x s32>), 128
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_256_idx0
@@ -147,19 +147,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %ymm1
+ liveins: $zmm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx0
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 0
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<8 x s32>) = COPY %ymm1
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -173,18 +173,18 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx0_undef
- ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: undef %2.sub_ymm:vr512 = COPY [[COPY]]
- ; ALL: %zmm0 = COPY %2
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY %2
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<8 x s32>) = COPY %ymm1
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 0
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -198,19 +198,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx1
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[COPY]], [[COPY1]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<8 x s32>) = COPY %ymm1
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
---
name: test_insert_256_idx1_undef
@@ -223,17 +223,17 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; ALL-LABEL: name: test_insert_256_idx1_undef
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
- ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY %ymm1
+ ; ALL: [[COPY:%[0-9]+]]:vr256x = COPY $ymm1
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr [[DEF]], [[COPY]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
- %1(<8 x s32>) = COPY %ymm1
+ %1(<8 x s32>) = COPY $ymm1
%2(<16 x s32>) = G_INSERT %0(<16 x s32>), %1(<8 x s32>), 256
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %ymm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-intrinsic-x86-flags-read-u32.mir Wed Jan 31 14:04:26 2018
@@ -9,8 +9,8 @@
define void @read_flags() { ret void }
; CHECK-LABEL: name: read_flags
; CHECK: bb.0:
- ; CHECK: [[RDFLAGS32_:%[0-9]+]]:gr32 = RDFLAGS32 implicit-def %esp, implicit %esp
- ; CHECK: %eax = COPY [[RDFLAGS32_]]
+ ; CHECK: [[RDFLAGS32_:%[0-9]+]]:gr32 = RDFLAGS32 implicit-def $esp, implicit $esp
+ ; CHECK: $eax = COPY [[RDFLAGS32_]]
...
---
@@ -24,5 +24,5 @@ registers:
body: |
bb.0:
%0(s32) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.x86.flags.read.u32)
- %eax = COPY %0(s32)
+ $eax = COPY %0(s32)
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-leaf-constant.mir Wed Jan 31 14:04:26 2018
@@ -34,11 +34,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1
- ; CHECK: %eax = COPY [[MOV32ri]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[MOV32ri]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
name: const_i32_1_optsize
@@ -50,12 +50,12 @@ registers:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1_optsize
- ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def %eflags
- ; CHECK: %eax = COPY [[MOV32r1_]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def $eflags
+ ; CHECK: $eax = COPY [[MOV32r1_]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
name: const_i32_1b
@@ -68,11 +68,11 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1b
; CHECK: [[MOV32ri:%[0-9]+]]:gr32 = MOV32ri 1
- ; CHECK: %eax = COPY [[MOV32ri]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: $eax = COPY [[MOV32ri]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
---
name: const_i32_1_optsizeb
@@ -84,10 +84,10 @@ registers:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: const_i32_1_optsizeb
- ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def %eflags
- ; CHECK: %eax = COPY [[MOV32r1_]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[MOV32r1_:%[0-9]+]]:gr32 = MOV32r1 implicit-def $eflags
+ ; CHECK: $eax = COPY [[MOV32r1_]]
+ ; CHECK: RET 0, implicit $eax
%0(s32) = G_CONSTANT i32 1
- %eax = COPY %0(s32)
- RET 0, implicit %eax
+ $eax = COPY %0(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir Wed Jan 31 14:04:26 2018
@@ -57,15 +57,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i8
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; ALL: %al = COPY [[MOV8rm]]
- ; ALL: RET 0, implicit %al
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; ALL: $al = COPY [[MOV8rm]]
+ ; ALL: RET 0, implicit $al
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -82,15 +82,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i16
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; ALL: %ax = COPY [[MOV16rm]]
- ; ALL: RET 0, implicit %ax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; ALL: $ax = COPY [[MOV16rm]]
+ ; ALL: RET 0, implicit $ax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -107,15 +107,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm1]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm1]]
+ ; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -134,18 +134,18 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i8
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV8mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV8rm]] :: (store 1 into %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV8mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV8rm]] :: (store 1 into %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm]]
+ ; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
%1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
G_STORE %0(s8), %1(p0) :: (store 1 into %ir.p1)
- %eax = COPY %1(p0)
- RET 0, implicit %eax
+ $eax = COPY %1(p0)
+ RET 0, implicit $eax
...
---
@@ -164,18 +164,18 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i16
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV16mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV16rm]] :: (store 2 into %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV16mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV16rm]] :: (store 2 into %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm]]
+ ; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
%1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p1)
- %eax = COPY %1(p0)
- RET 0, implicit %eax
+ $eax = COPY %1(p0)
+ RET 0, implicit $eax
...
---
@@ -194,18 +194,18 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV32mr [[MOV32rm1]], 1, %noreg, 0, %noreg, [[MOV32rm]] :: (store 4 into %ir.p1)
- ; ALL: %eax = COPY [[MOV32rm1]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV32mr [[MOV32rm1]], 1, $noreg, 0, $noreg, [[MOV32rm]] :: (store 4 into %ir.p1)
+ ; ALL: $eax = COPY [[MOV32rm1]]
+ ; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
%1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %eax = COPY %1(p0)
- RET 0, implicit %eax
+ $eax = COPY %1(p0)
+ RET 0, implicit $eax
...
---
@@ -222,15 +222,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, %noreg, 0, %noreg :: (load 4 from %ir.ptr1)
- ; ALL: %eax = COPY [[MOV32rm1]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr1)
+ ; ALL: $eax = COPY [[MOV32rm1]]
+ ; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
%0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
%2(p0) = G_LOAD %0(p0) :: (load 4 from %ir.ptr1)
- %eax = COPY %2(p0)
- RET 0, implicit %eax
+ $eax = COPY %2(p0)
+ RET 0, implicit $eax
...
---
@@ -249,9 +249,9 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, %noreg, 0, %noreg :: (invariant load 4 from %fixed-stack.1, align 0)
- ; ALL: MOV32mr [[MOV32rm]], 1, %noreg, 0, %noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: MOV32mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
; ALL: RET 0
%2(p0) = G_FRAME_INDEX %fixed-stack.1
%0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar.mir Wed Jan 31 14:04:26 2018
@@ -109,32 +109,32 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i8
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; SSE: %al = COPY [[MOV8rm]]
- ; SSE: RET 0, implicit %al
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; SSE: $al = COPY [[MOV8rm]]
+ ; SSE: RET 0, implicit $al
; AVX-LABEL: name: test_load_i8
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; AVX: %al = COPY [[MOV8rm]]
- ; AVX: RET 0, implicit %al
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; AVX: $al = COPY [[MOV8rm]]
+ ; AVX: RET 0, implicit $al
; AVX512F-LABEL: name: test_load_i8
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; AVX512F: %al = COPY [[MOV8rm]]
- ; AVX512F: RET 0, implicit %al
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; AVX512F: $al = COPY [[MOV8rm]]
+ ; AVX512F: RET 0, implicit $al
; AVX512VL-LABEL: name: test_load_i8
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, %noreg, 0, %noreg :: (load 1 from %ir.p1)
- ; AVX512VL: %al = COPY [[MOV8rm]]
- ; AVX512VL: RET 0, implicit %al
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[COPY]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
+ ; AVX512VL: $al = COPY [[MOV8rm]]
+ ; AVX512VL: RET 0, implicit $al
+ %0(p0) = COPY $rdi
%1(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -147,32 +147,32 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i16
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; SSE: %ax = COPY [[MOV16rm]]
- ; SSE: RET 0, implicit %ax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; SSE: $ax = COPY [[MOV16rm]]
+ ; SSE: RET 0, implicit $ax
; AVX-LABEL: name: test_load_i16
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; AVX: %ax = COPY [[MOV16rm]]
- ; AVX: RET 0, implicit %ax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; AVX: $ax = COPY [[MOV16rm]]
+ ; AVX: RET 0, implicit $ax
; AVX512F-LABEL: name: test_load_i16
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; AVX512F: %ax = COPY [[MOV16rm]]
- ; AVX512F: RET 0, implicit %ax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; AVX512F: $ax = COPY [[MOV16rm]]
+ ; AVX512F: RET 0, implicit $ax
; AVX512VL-LABEL: name: test_load_i16
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, %noreg, 0, %noreg :: (load 2 from %ir.p1)
- ; AVX512VL: %ax = COPY [[MOV16rm]]
- ; AVX512VL: RET 0, implicit %ax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[COPY]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
+ ; AVX512VL: $ax = COPY [[MOV16rm]]
+ ; AVX512VL: RET 0, implicit $ax
+ %0(p0) = COPY $rdi
%1(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -185,32 +185,32 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i32
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; SSE: %eax = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %eax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; SSE: $eax = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_load_i32
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX: %eax = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %eax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX: $eax = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_load_i32
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512F: %eax = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %eax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512F: $eax = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_load_i32
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512VL: %eax = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %eax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512VL: $eax = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
---
@@ -223,32 +223,32 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_i64
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; SSE: %rax = COPY [[MOV64rm]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; SSE: $rax = COPY [[MOV64rm]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_load_i64
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX: %rax = COPY [[MOV64rm]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX: $rax = COPY [[MOV64rm]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_load_i64
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512F: %rax = COPY [[MOV64rm]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512F: $rax = COPY [[MOV64rm]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_load_i64
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512VL: %rax = COPY [[MOV64rm]]
- ; AVX512VL: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512VL: $rax = COPY [[MOV64rm]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %rax = COPY %1(s64)
- RET 0, implicit %rax
+ $rax = COPY %1(s64)
+ RET 0, implicit $rax
...
---
@@ -261,32 +261,32 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_float
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX: %xmm0 = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %xmm0 = COPY %1(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s32)
+ RET 0, implicit $xmm0
...
---
@@ -299,32 +299,32 @@ registers:
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_float_vecreg
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOVSSrm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOVSSrm:%[0-9]+]]:fr32 = MOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOVSSrm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_float_vecreg
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX: %xmm0 = COPY [[VMOVSSrm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[VMOVSSrm:%[0-9]+]]:fr32 = VMOVSSrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[VMOVSSrm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_float_vecreg
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[VMOVSSZrm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[VMOVSSZrm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_float_vecreg
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 4 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[VMOVSSZrm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[VMOVSSZrm:%[0-9]+]]:fr32x = VMOVSSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[VMOVSSZrm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
- %xmm0 = COPY %1(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s32)
+ RET 0, implicit $xmm0
...
---
@@ -337,32 +337,32 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_double
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOV64rm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOV64rm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX: %xmm0 = COPY [[MOV64rm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[MOV64rm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[MOV64rm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[MOV64rm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[MOV64rm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[MOV64rm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
@@ -375,32 +375,32 @@ registers:
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_double_vecreg
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; SSE: %xmm0 = COPY [[MOVSDrm]]
- ; SSE: RET 0, implicit %xmm0
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOVSDrm:%[0-9]+]]:fr64 = MOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; SSE: $xmm0 = COPY [[MOVSDrm]]
+ ; SSE: RET 0, implicit $xmm0
; AVX-LABEL: name: test_load_double_vecreg
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX: %xmm0 = COPY [[VMOVSDrm]]
- ; AVX: RET 0, implicit %xmm0
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[VMOVSDrm:%[0-9]+]]:fr64 = VMOVSDrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX: $xmm0 = COPY [[VMOVSDrm]]
+ ; AVX: RET 0, implicit $xmm0
; AVX512F-LABEL: name: test_load_double_vecreg
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512F: %xmm0 = COPY [[VMOVSDZrm]]
- ; AVX512F: RET 0, implicit %xmm0
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512F: $xmm0 = COPY [[VMOVSDZrm]]
+ ; AVX512F: RET 0, implicit $xmm0
; AVX512VL-LABEL: name: test_load_double_vecreg
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.p1)
- ; AVX512VL: %xmm0 = COPY [[VMOVSDZrm]]
- ; AVX512VL: RET 0, implicit %xmm0
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[VMOVSDZrm:%[0-9]+]]:fr64x = VMOVSDZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.p1)
+ ; AVX512VL: $xmm0 = COPY [[VMOVSDZrm]]
+ ; AVX512VL: RET 0, implicit $xmm0
+ %0(p0) = COPY $rdi
%1(s64) = G_LOAD %0(p0) :: (load 8 from %ir.p1)
- %xmm0 = COPY %1(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(s64)
+ RET 0, implicit $xmm0
...
---
@@ -413,37 +413,37 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %rsi
+ liveins: $edi, $rsi
; SSE-LABEL: name: test_store_i32
- ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; SSE: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_i32
- ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_i32
- ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512F: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_i32
- ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512VL: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s32) = COPY %edi
- %1(p0) = COPY %rsi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s32) = COPY $edi
+ %1(p0) = COPY $rsi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -456,37 +456,37 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; SSE-LABEL: name: test_store_i64
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; SSE: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_i64
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_i64
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512F: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_i64
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512VL: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(p0) = COPY %rsi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(p0) = COPY $rsi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -500,42 +500,42 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; SSE: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX512F: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY2:%[0-9]+]]:gr32 = COPY [[COPY]]
- ; AVX512VL: MOV32mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 4 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s32) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: MOV32mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s32) = COPY $xmm0
+ %1(p0) = COPY $rdi
%2(s32) = COPY %0(s32)
G_STORE %2(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -548,37 +548,37 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_float_vec
- ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: MOVSSmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: MOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_float_vec
- ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: VMOVSSmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:fr32 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: VMOVSSmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_float_vec
- ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVSSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_float_vec
- ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: VMOVSSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 4 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s32) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr32x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: VMOVSSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 4 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s32) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -590,45 +590,45 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# NO_AVX512X: %0:fr64 = COPY %xmm0
+# NO_AVX512X: %0:fr64 = COPY $xmm0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; SSE: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; SSE: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512F: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX512F: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
; AVX512VL: [[COPY2:%[0-9]+]]:gr64 = COPY [[COPY]]
- ; AVX512VL: MOV64mr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY2]] :: (store 8 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s64) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: MOV64mr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY2]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s64) = COPY $xmm0
+ %1(p0) = COPY $rdi
%2(s64) = COPY %0(s64)
G_STORE %2(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -641,37 +641,37 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
; SSE-LABEL: name: test_store_double_vec
- ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: MOVSDmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; SSE: %rax = COPY [[COPY1]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: MOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; SSE: $rax = COPY [[COPY1]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_store_double_vec
- ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY %xmm0
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: VMOVSDmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX: %rax = COPY [[COPY1]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:fr64 = COPY $xmm0
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: VMOVSDmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX: $rax = COPY [[COPY1]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_store_double_vec
- ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVSDZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512F: %rax = COPY [[COPY1]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512F: $rax = COPY [[COPY1]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_store_double_vec
- ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY %xmm0
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: VMOVSDZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 8 into %ir.p1)
- ; AVX512VL: %rax = COPY [[COPY1]]
- ; AVX512VL: RET 0, implicit %rax
- %0(s64) = COPY %xmm0
- %1(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:fr64x = COPY $xmm0
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: VMOVSDZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 8 into %ir.p1)
+ ; AVX512VL: $rax = COPY [[COPY1]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(s64) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(s64), %1(p0) :: (store 8 into %ir.p1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -685,32 +685,32 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; SSE-LABEL: name: test_load_ptr
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; SSE: %rax = COPY [[MOV64rm]]
- ; SSE: RET 0, implicit %rax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; SSE: $rax = COPY [[MOV64rm]]
+ ; SSE: RET 0, implicit $rax
; AVX-LABEL: name: test_load_ptr
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; AVX: %rax = COPY [[MOV64rm]]
- ; AVX: RET 0, implicit %rax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; AVX: $rax = COPY [[MOV64rm]]
+ ; AVX: RET 0, implicit $rax
; AVX512F-LABEL: name: test_load_ptr
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; AVX512F: %rax = COPY [[MOV64rm]]
- ; AVX512F: RET 0, implicit %rax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; AVX512F: $rax = COPY [[MOV64rm]]
+ ; AVX512F: RET 0, implicit $rax
; AVX512VL-LABEL: name: test_load_ptr
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, %noreg, 0, %noreg :: (load 8 from %ir.ptr1)
- ; AVX512VL: %rax = COPY [[MOV64rm]]
- ; AVX512VL: RET 0, implicit %rax
- %0(p0) = COPY %rdi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[MOV64rm:%[0-9]+]]:gr64 = MOV64rm [[COPY]], 1, $noreg, 0, $noreg :: (load 8 from %ir.ptr1)
+ ; AVX512VL: $rax = COPY [[MOV64rm]]
+ ; AVX512VL: RET 0, implicit $rax
+ %0(p0) = COPY $rdi
%1(p0) = G_LOAD %0(p0) :: (load 8 from %ir.ptr1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -724,30 +724,30 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; SSE-LABEL: name: test_store_ptr
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; SSE: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; SSE: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; SSE: RET 0
; AVX-LABEL: name: test_store_ptr
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; AVX: RET 0
; AVX512F-LABEL: name: test_store_ptr
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512F: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512F: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; AVX512F: RET 0
; AVX512VL-LABEL: name: test_store_ptr
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; AVX512VL: MOV64mr [[COPY]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; AVX512VL: MOV64mr [[COPY]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 8 into %ir.ptr1)
; AVX512VL: RET 0
- %0(p0) = COPY %rdi
- %1(p0) = COPY %rsi
+ %0(p0) = COPY $rdi
+ %1(p0) = COPY $rsi
G_STORE %1(p0), %0(p0) :: (store 8 into %ir.ptr1)
RET 0
@@ -765,44 +765,44 @@ registers:
- { id: 4, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
; SSE-LABEL: name: test_gep_folding
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; SSE: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; SSE: %eax = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %eax
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; SSE: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; SSE: $eax = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_gep_folding
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; AVX: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX: %eax = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %eax
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; AVX: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX: $eax = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_gep_folding
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; AVX512F: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512F: %eax = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %eax
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; AVX512F: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512F: $eax = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_gep_folding
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; AVX512VL: MOV32mr [[COPY]], 1, %noreg, 20, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, %noreg, 20, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512VL: %eax = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %eax
- %0(p0) = COPY %rdi
- %1(s32) = COPY %esi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; AVX512VL: MOV32mr [[COPY]], 1, $noreg, 20, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY]], 1, $noreg, 20, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512VL: $eax = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
+ %1(s32) = COPY $esi
%2(s64) = G_CONSTANT i64 20
%3(p0) = G_GEP %0, %2(s64)
G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
%4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
- %eax = COPY %4(s32)
- RET 0, implicit %eax
+ $eax = COPY %4(s32)
+ RET 0, implicit $eax
...
---
@@ -818,51 +818,51 @@ registers:
- { id: 4, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %esi, %rdi
+ liveins: $esi, $rdi
; SSE-LABEL: name: test_gep_folding_largeGepIndex
- ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; SSE: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; SSE: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; SSE: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; SSE: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; SSE: %eax = COPY [[MOV32rm]]
- ; SSE: RET 0, implicit %eax
+ ; SSE: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; SSE: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; SSE: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; SSE: $eax = COPY [[MOV32rm]]
+ ; SSE: RET 0, implicit $eax
; AVX-LABEL: name: test_gep_folding_largeGepIndex
- ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; AVX: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; AVX: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX: %eax = COPY [[MOV32rm]]
- ; AVX: RET 0, implicit %eax
+ ; AVX: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; AVX: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX: $eax = COPY [[MOV32rm]]
+ ; AVX: RET 0, implicit $eax
; AVX512F-LABEL: name: test_gep_folding_largeGepIndex
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512F: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; AVX512F: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512F: %eax = COPY [[MOV32rm]]
- ; AVX512F: RET 0, implicit %eax
+ ; AVX512F: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; AVX512F: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512F: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512F: $eax = COPY [[MOV32rm]]
+ ; AVX512F: RET 0, implicit $eax
; AVX512VL-LABEL: name: test_gep_folding_largeGepIndex
- ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; AVX512VL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512VL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; AVX512VL: [[MOV64ri:%[0-9]+]]:gr64_nosp = MOV64ri 228719476720
- ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, %noreg
- ; AVX512VL: MOV32mr [[LEA64r]], 1, %noreg, 0, %noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
- ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, %noreg, 0, %noreg :: (load 4 from %ir.arrayidx)
- ; AVX512VL: %eax = COPY [[MOV32rm]]
- ; AVX512VL: RET 0, implicit %eax
- %0(p0) = COPY %rdi
- %1(s32) = COPY %esi
+ ; AVX512VL: [[LEA64r:%[0-9]+]]:gr64 = LEA64r [[COPY]], 1, [[MOV64ri]], 0, $noreg
+ ; AVX512VL: MOV32mr [[LEA64r]], 1, $noreg, 0, $noreg, [[COPY1]] :: (store 4 into %ir.arrayidx)
+ ; AVX512VL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[LEA64r]], 1, $noreg, 0, $noreg :: (load 4 from %ir.arrayidx)
+ ; AVX512VL: $eax = COPY [[MOV32rm]]
+ ; AVX512VL: RET 0, implicit $eax
+ %0(p0) = COPY $rdi
+ %1(s32) = COPY $esi
%2(s64) = G_CONSTANT i64 228719476720
%3(p0) = G_GEP %0, %2(s64)
G_STORE %1(s32), %3(p0) :: (store 4 into %ir.arrayidx)
%4(s32) = G_LOAD %3(p0) :: (load 4 from %ir.arrayidx)
- %eax = COPY %4(s32)
- RET 0, implicit %eax
+ $eax = COPY %4(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v128.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v128.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v128.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v128.mir Wed Jan 31 14:04:26 2018
@@ -34,20 +34,20 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:vr128 = MOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# AVX: %1:vr128 = VMOVUPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1, align 1)
-# ALL: %xmm0 = COPY %1
+# ALL: %0:gr64 = COPY $rdi
+# SSE: %1:vr128 = MOVUPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# AVX: %1:vr128 = VMOVUPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# AVX512F: %1:vr128x = VMOVUPSZ128rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# AVX512VL: %1:vr128x = VMOVUPSZ128rm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1, align 1)
+# ALL: $xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1, align 1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -59,20 +59,20 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# ALL: %0:gr64 = COPY %rdi
-# SSE: %1:vr128 = MOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# AVX: %1:vr128 = VMOVAPSrm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, %noreg, 0, %noreg :: (load 16 from %ir.p1)
-# ALL: %xmm0 = COPY %1
+# ALL: %0:gr64 = COPY $rdi
+# SSE: %1:vr128 = MOVAPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# AVX: %1:vr128 = VMOVAPSrm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# AVX512F: %1:vr128x = VMOVAPSZ128rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# AVX512VL: %1:vr128x = VMOVAPSZ128rm %0, 1, $noreg, 0, $noreg :: (load 16 from %ir.p1)
+# ALL: $xmm0 = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<4 x s32>) = G_LOAD %0(p0) :: (load 16 from %ir.p1)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -84,23 +84,23 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr128 = COPY %xmm0
-# AVX512ALL: %0:vr128x = COPY %xmm0
-# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# AVX: VMOVAPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# AVX512VL: VMOVAPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1)
-# ALL: %rax = COPY %1
+# NO_AVX512F: %0:vr128 = COPY $xmm0
+# AVX512ALL: %0:vr128x = COPY $xmm0
+# ALL: %1:gr64 = COPY $rdi
+# SSE: MOVAPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# AVX: VMOVAPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# AVX512F: VMOVAPSZ128mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# AVX512VL: VMOVAPSZ128mr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1)
+# ALL: $rax = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(p0) = COPY %rdi
+ %0(<4 x s32>) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 16)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
---
@@ -112,22 +112,22 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr128 = COPY %xmm0
-# AVX512ALL: %0:vr128x = COPY %xmm0
-# ALL: %1:gr64 = COPY %rdi
-# SSE: MOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# AVX: VMOVUPSmr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# AVX512VL: VMOVUPSZ128mr %1, 1, %noreg, 0, %noreg, %0 :: (store 16 into %ir.p1, align 1)
-# ALL: %rax = COPY %1
+# NO_AVX512F: %0:vr128 = COPY $xmm0
+# AVX512ALL: %0:vr128x = COPY $xmm0
+# ALL: %1:gr64 = COPY $rdi
+# SSE: MOVUPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX: VMOVUPSmr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512F: VMOVUPSZ128mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# AVX512VL: VMOVUPSZ128mr %1, 1, $noreg, 0, $noreg, %0 :: (store 16 into %ir.p1, align 1)
+# ALL: $rax = COPY %1
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %xmm0
+ liveins: $rdi, $xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(p0) = COPY %rdi
+ %0(<4 x s32>) = COPY $xmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<4 x s32>), %1(p0) :: (store 16 into %ir.p1, align 1)
- %rax = COPY %1(p0)
- RET 0, implicit %rax
+ $rax = COPY %1(p0)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v256.mir Wed Jan 31 14:04:26 2018
@@ -42,28 +42,28 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# NO_AVX512F: %0:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
-# NO_AVX512F-NEXT: %ymm0 = COPY %1
-# NO_AVX512F-NEXT: RET 0, implicit %ymm0
-#
-# AVX512F: %0:gr64 = COPY %rdi
-# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
-# AVX512F-NEXT: %ymm0 = COPY %1
-# AVX512F-NEXT: RET 0, implicit %ymm0
-#
-# AVX512VL: %0:gr64 = COPY %rdi
-# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1, align 1)
-# AVX512VL-NEXT: %ymm0 = COPY %1
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# NO_AVX512F: %0:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: %1:vr256 = VMOVUPSYrm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1)
+# NO_AVX512F-NEXT: $ymm0 = COPY %1
+# NO_AVX512F-NEXT: RET 0, implicit $ymm0
+#
+# AVX512F: %0:gr64 = COPY $rdi
+# AVX512F-NEXT: %1:vr256x = VMOVUPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1)
+# AVX512F-NEXT: $ymm0 = COPY %1
+# AVX512F-NEXT: RET 0, implicit $ymm0
+#
+# AVX512VL: %0:gr64 = COPY $rdi
+# AVX512VL-NEXT: %1:vr256x = VMOVUPSZ256rm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1, align 1)
+# AVX512VL-NEXT: $ymm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1, align 1)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -75,28 +75,28 @@ regBankSelected: true
registers:
- { id: 0, class: gpr }
- { id: 1, class: vecr }
-# NO_AVX512F: %0:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
-# NO_AVX512F-NEXT: %ymm0 = COPY %1
-# NO_AVX512F-NEXT: RET 0, implicit %ymm0
-#
-# AVX512F: %0:gr64 = COPY %rdi
-# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
-# AVX512F-NEXT: %ymm0 = COPY %1
-# AVX512F-NEXT: RET 0, implicit %ymm0
-#
-# AVX512VL: %0:gr64 = COPY %rdi
-# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, %noreg, 0, %noreg :: (load 32 from %ir.p1)
-# AVX512VL-NEXT: %ymm0 = COPY %1
-# AVX512VL-NEXT: RET 0, implicit %ymm0
+# NO_AVX512F: %0:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: %1:vr256 = VMOVAPSYrm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1)
+# NO_AVX512F-NEXT: $ymm0 = COPY %1
+# NO_AVX512F-NEXT: RET 0, implicit $ymm0
+#
+# AVX512F: %0:gr64 = COPY $rdi
+# AVX512F-NEXT: %1:vr256x = VMOVAPSZ256rm_NOVLX %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1)
+# AVX512F-NEXT: $ymm0 = COPY %1
+# AVX512F-NEXT: RET 0, implicit $ymm0
+#
+# AVX512VL: %0:gr64 = COPY $rdi
+# AVX512VL-NEXT: %1:vr256x = VMOVAPSZ256rm %0, 1, $noreg, 0, $noreg :: (load 32 from %ir.p1)
+# AVX512VL-NEXT: $ymm0 = COPY %1
+# AVX512VL-NEXT: RET 0, implicit $ymm0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
- %0(p0) = COPY %rdi
+ %0(p0) = COPY $rdi
%1(<8 x s32>) = G_LOAD %0(p0) :: (load 32 from %ir.p1)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -115,26 +115,26 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr256 = COPY %ymm0
-# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
+# NO_AVX512F: %0:vr256 = COPY $ymm0
+# NO_AVX512F-NEXT: %1:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: VMOVUPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1)
# NO_AVX512F-NEXT: RET 0
#
-# AVX512F: %0:vr256x = COPY %ymm0
-# AVX512F-NEXT: %1:gr64 = COPY %rdi
-# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
+# AVX512F: %0:vr256x = COPY $ymm0
+# AVX512F-NEXT: %1:gr64 = COPY $rdi
+# AVX512F-NEXT: VMOVUPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1)
# AVX512F-NEXT: RET 0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:gr64 = COPY %rdi
-# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1, align 1)
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:gr64 = COPY $rdi
+# AVX512VL-NEXT: VMOVUPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1, align 1)
# AVX512VL-NEXT: RET 0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %ymm0
+ liveins: $rdi, $ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(p0) = COPY %rdi
+ %0(<8 x s32>) = COPY $ymm0
+ %1(p0) = COPY $rdi
G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1, align 1)
RET 0
@@ -155,26 +155,26 @@ regBankSelected: true
registers:
- { id: 0, class: vecr }
- { id: 1, class: gpr }
-# NO_AVX512F: %0:vr256 = COPY %ymm0
-# NO_AVX512F-NEXT: %1:gr64 = COPY %rdi
-# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
+# NO_AVX512F: %0:vr256 = COPY $ymm0
+# NO_AVX512F-NEXT: %1:gr64 = COPY $rdi
+# NO_AVX512F-NEXT: VMOVAPSYmr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1)
# NO_AVX512F-NEXT: RET 0
#
-# AVX512F: %0:vr256x = COPY %ymm0
-# AVX512F-NEXT: %1:gr64 = COPY %rdi
-# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
+# AVX512F: %0:vr256x = COPY $ymm0
+# AVX512F-NEXT: %1:gr64 = COPY $rdi
+# AVX512F-NEXT: VMOVAPSZ256mr_NOVLX %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1)
# AVX512F-NEXT: RET 0
#
-# AVX512VL: %0:vr256x = COPY %ymm0
-# AVX512VL-NEXT: %1:gr64 = COPY %rdi
-# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, %noreg, 0, %noreg, %0 :: (store 32 into %ir.p1)
+# AVX512VL: %0:vr256x = COPY $ymm0
+# AVX512VL-NEXT: %1:gr64 = COPY $rdi
+# AVX512VL-NEXT: VMOVAPSZ256mr %1, 1, $noreg, 0, $noreg, %0 :: (store 32 into %ir.p1)
# AVX512VL-NEXT: RET 0
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %ymm0
+ liveins: $rdi, $ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(p0) = COPY %rdi
+ %0(<8 x s32>) = COPY $ymm0
+ %1(p0) = COPY $rdi
G_STORE %0(<8 x s32>), %1(p0) :: (store 32 into %ir.p1)
RET 0
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-v512.mir Wed Jan 31 14:04:26 2018
@@ -32,17 +32,17 @@ registers:
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; AVX512F-LABEL: name: test_load_v16i32_noalign
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 1)
- ; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]]
- ; AVX512F: RET 0, implicit %zmm0
- %0(p0) = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 64 from %ir.p1, align 1)
+ ; AVX512F: $zmm0 = COPY [[VMOVUPSZrm]]
+ ; AVX512F: RET 0, implicit $zmm0
+ %0(p0) = COPY $rdi
%1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 1)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -55,17 +55,17 @@ registers:
- { id: 1, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; AVX512F-LABEL: name: test_load_v16i32_align
- ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, %noreg, 0, %noreg :: (load 64 from %ir.p1, align 32)
- ; AVX512F: %zmm0 = COPY [[VMOVUPSZrm]]
- ; AVX512F: RET 0, implicit %zmm0
- %0(p0) = COPY %rdi
+ ; AVX512F: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: [[VMOVUPSZrm:%[0-9]+]]:vr512 = VMOVUPSZrm [[COPY]], 1, $noreg, 0, $noreg :: (load 64 from %ir.p1, align 32)
+ ; AVX512F: $zmm0 = COPY [[VMOVUPSZrm]]
+ ; AVX512F: RET 0, implicit $zmm0
+ %0(p0) = COPY $rdi
%1(<16 x s32>) = G_LOAD %0(p0) :: (load 64 from %ir.p1, align 32)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -78,15 +78,15 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %zmm0
+ liveins: $rdi, $zmm0
; AVX512F-LABEL: name: test_store_v16i32_noalign
- ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 1)
+ ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVUPSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 64 into %ir.p1, align 1)
; AVX512F: RET 0
- %0(<16 x s32>) = COPY %zmm0
- %1(p0) = COPY %rdi
+ %0(<16 x s32>) = COPY $zmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 1)
RET 0
@@ -101,15 +101,15 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %zmm0
+ liveins: $rdi, $zmm0
; AVX512F-LABEL: name: test_store_v16i32_align
- ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY %rdi
- ; AVX512F: VMOVUPSZmr [[COPY1]], 1, %noreg, 0, %noreg, [[COPY]] :: (store 64 into %ir.p1, align 32)
+ ; AVX512F: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; AVX512F: [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
+ ; AVX512F: VMOVUPSZmr [[COPY1]], 1, $noreg, 0, $noreg, [[COPY]] :: (store 64 into %ir.p1, align 32)
; AVX512F: RET 0
- %0(<16 x s32>) = COPY %zmm0
- %1(p0) = COPY %rdi
+ %0(<16 x s32>) = COPY $zmm0
+ %1(p0) = COPY $rdi
G_STORE %0(<16 x s32>), %1(p0) :: (store 64 into %ir.p1, align 32)
RET 0
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec256.mir Wed Jan 31 14:04:26 2018
@@ -24,18 +24,18 @@ body: |
; AVX: [[DEF:%[0-9]+]]:vr128 = IMPLICIT_DEF
; AVX: undef %2.sub_xmm:vr256 = COPY [[DEF]]
; AVX: [[VINSERTF128rr:%[0-9]+]]:vr256 = VINSERTF128rr %2, [[DEF]], 1
- ; AVX: %ymm0 = COPY [[VINSERTF128rr]]
- ; AVX: RET 0, implicit %ymm0
+ ; AVX: $ymm0 = COPY [[VINSERTF128rr]]
+ ; AVX: RET 0, implicit $ymm0
; AVX512VL-LABEL: name: test_merge
; AVX512VL: [[DEF:%[0-9]+]]:vr128x = IMPLICIT_DEF
; AVX512VL: undef %2.sub_xmm:vr256x = COPY [[DEF]]
; AVX512VL: [[VINSERTF32x4Z256rr:%[0-9]+]]:vr256x = VINSERTF32x4Z256rr %2, [[DEF]], 1
- ; AVX512VL: %ymm0 = COPY [[VINSERTF32x4Z256rr]]
- ; AVX512VL: RET 0, implicit %ymm0
+ ; AVX512VL: $ymm0 = COPY [[VINSERTF32x4Z256rr]]
+ ; AVX512VL: RET 0, implicit $ymm0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<8 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-merge-vec512.mir Wed Jan 31 14:04:26 2018
@@ -27,12 +27,12 @@ body: |
; ALL: [[VINSERTF32x4Zrr:%[0-9]+]]:vr512 = VINSERTF32x4Zrr %2, [[DEF]], 1
; ALL: [[VINSERTF32x4Zrr1:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr]], [[DEF]], 2
; ALL: [[VINSERTF32x4Zrr2:%[0-9]+]]:vr512 = VINSERTF32x4Zrr [[VINSERTF32x4Zrr1]], [[DEF]], 3
- ; ALL: %zmm0 = COPY [[VINSERTF32x4Zrr2]]
- ; ALL: RET 0, implicit %zmm0
+ ; ALL: $zmm0 = COPY [[VINSERTF32x4Zrr2]]
+ ; ALL: RET 0, implicit $zmm0
%0(<4 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = G_MERGE_VALUES %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>), %0(<4 x s32>)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -50,12 +50,12 @@ body: |
; ALL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
; ALL: undef %2.sub_ymm:vr512 = COPY [[DEF]]
; ALL: [[VINSERTF64x4Zrr:%[0-9]+]]:vr512 = VINSERTF64x4Zrr %2, [[DEF]], 1
- ; ALL: %zmm0 = COPY [[VINSERTF64x4Zrr]]
- ; ALL: RET 0, implicit %zmm0
+ ; ALL: $zmm0 = COPY [[VINSERTF64x4Zrr]]
+ ; ALL: RET 0, implicit $zmm0
%0(<8 x s32>) = IMPLICIT_DEF
%1(<16 x s32>) = G_MERGE_VALUES %0(<8 x s32>), %0(<8 x s32>)
- %zmm0 = COPY %1(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %1(<16 x s32>)
+ RET 0, implicit $zmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-scalar.mir Wed Jan 31 14:04:26 2018
@@ -29,19 +29,19 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_mul_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[IMUL16rr:%[0-9]+]]:gr16 = IMUL16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[IMUL16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[IMUL16rr:%[0-9]+]]:gr16 = IMUL16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[IMUL16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_MUL %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -55,19 +55,19 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_mul_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[IMUL32rr:%[0-9]+]]:gr32 = IMUL32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[IMUL32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[IMUL32rr:%[0-9]+]]:gr32 = IMUL32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[IMUL32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_MUL %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -81,18 +81,18 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_mul_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[IMUL64rr:%[0-9]+]]:gr64 = IMUL64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[IMUL64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[IMUL64rr:%[0-9]+]]:gr64 = IMUL64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[IMUL64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_MUL %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-vec.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-vec.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-vec.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-mul-vec.mir Wed Jan 31 14:04:26 2018
@@ -100,19 +100,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v8i16
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[PMULLWrr:%[0-9]+]]:vr128 = PMULLWrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[PMULLWrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[PMULLWrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -126,19 +126,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v8i16_avx
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[VPMULLWrr:%[0-9]+]]:vr128 = VPMULLWrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLWrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLWrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -152,19 +152,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v8i16_avx512bwvl
- ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; CHECK: [[VPMULLWZ128rr:%[0-9]+]]:vr128x = VPMULLWZ128rr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLWZ128rr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLWZ128rr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_MUL %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -178,19 +178,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v4i32
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[PMULLDrr:%[0-9]+]]:vr128 = PMULLDrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[PMULLDrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[PMULLDrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -204,19 +204,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v4i32_avx
- ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128 = COPY $xmm1
; CHECK: [[VPMULLDrr:%[0-9]+]]:vr128 = VPMULLDrr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLDrr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLDrr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -230,19 +230,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v4i32_avx512vl
- ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; CHECK: [[VPMULLDZ128rr:%[0-9]+]]:vr128x = VPMULLDZ128rr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLDZ128rr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLDZ128rr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_MUL %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -256,19 +256,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
; CHECK-LABEL: name: test_mul_v2i64
- ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY %xmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY %xmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr128x = COPY $xmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr128x = COPY $xmm1
; CHECK: [[VPMULLQZ128rr:%[0-9]+]]:vr128x = VPMULLQZ128rr [[COPY]], [[COPY1]]
- ; CHECK: %xmm0 = COPY [[VPMULLQZ128rr]]
- ; CHECK: RET 0, implicit %xmm0
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ ; CHECK: $xmm0 = COPY [[VPMULLQZ128rr]]
+ ; CHECK: RET 0, implicit $xmm0
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_MUL %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
---
@@ -282,19 +282,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v16i16
- ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1
; CHECK: [[VPMULLWYrr:%[0-9]+]]:vr256 = VPMULLWYrr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLWYrr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLWYrr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_MUL %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -308,19 +308,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v16i16_avx512bwvl
- ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; CHECK: [[VPMULLWZ256rr:%[0-9]+]]:vr256x = VPMULLWZ256rr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLWZ256rr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLWZ256rr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_MUL %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -334,19 +334,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v8i32
- ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256 = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256 = COPY $ymm1
; CHECK: [[VPMULLDYrr:%[0-9]+]]:vr256 = VPMULLDYrr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLDYrr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLDYrr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_MUL %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -360,19 +360,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v8i32_avx512vl
- ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; CHECK: [[VPMULLDZ256rr:%[0-9]+]]:vr256x = VPMULLDZ256rr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLDZ256rr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLDZ256rr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_MUL %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -386,19 +386,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
; CHECK-LABEL: name: test_mul_v4i64
- ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY %ymm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY %ymm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr256x = COPY $ymm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr256x = COPY $ymm1
; CHECK: [[VPMULLQZ256rr:%[0-9]+]]:vr256x = VPMULLQZ256rr [[COPY]], [[COPY1]]
- ; CHECK: %ymm0 = COPY [[VPMULLQZ256rr]]
- ; CHECK: RET 0, implicit %ymm0
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ ; CHECK: $ymm0 = COPY [[VPMULLQZ256rr]]
+ ; CHECK: RET 0, implicit $ymm0
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_MUL %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
---
@@ -412,19 +412,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; CHECK-LABEL: name: test_mul_v32i16
- ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; CHECK: [[VPMULLWZrr:%[0-9]+]]:vr512 = VPMULLWZrr [[COPY]], [[COPY1]]
- ; CHECK: %zmm0 = COPY [[VPMULLWZrr]]
- ; CHECK: RET 0, implicit %zmm0
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ ; CHECK: $zmm0 = COPY [[VPMULLWZrr]]
+ ; CHECK: RET 0, implicit $zmm0
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_MUL %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -438,19 +438,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; CHECK-LABEL: name: test_mul_v16i32
- ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; CHECK: [[VPMULLDZrr:%[0-9]+]]:vr512 = VPMULLDZrr [[COPY]], [[COPY1]]
- ; CHECK: %zmm0 = COPY [[VPMULLDZrr]]
- ; CHECK: RET 0, implicit %zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ ; CHECK: $zmm0 = COPY [[VPMULLDZrr]]
+ ; CHECK: RET 0, implicit $zmm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_MUL %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -464,18 +464,18 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; CHECK-LABEL: name: test_mul_v8i64
- ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; CHECK: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; CHECK: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; CHECK: [[VPMULLQZrr:%[0-9]+]]:vr512 = VPMULLQZrr [[COPY]], [[COPY1]]
- ; CHECK: %zmm0 = COPY [[VPMULLQZrr]]
- ; CHECK: RET 0, implicit %zmm0
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ ; CHECK: $zmm0 = COPY [[VPMULLQZrr]]
+ ; CHECK: RET 0, implicit $zmm0
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_MUL %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-or-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-or-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-or-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-or-scalar.mir Wed Jan 31 14:04:26 2018
@@ -38,19 +38,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_or_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; ALL: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %al = COPY [[OR8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; ALL: [[OR8rr:%[0-9]+]]:gr8 = OR8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $al = COPY [[OR8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_OR %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -68,19 +68,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_or_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[OR16rr:%[0-9]+]]:gr16 = OR16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[OR16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[OR16rr:%[0-9]+]]:gr16 = OR16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[OR16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_OR %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -98,19 +98,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_or_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[OR32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[OR32rr:%[0-9]+]]:gr32 = OR32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[OR32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_OR %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -128,18 +128,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_or_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[OR64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[OR64rr:%[0-9]+]]:gr64 = OR64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[OR64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_OR %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-phi.mir Wed Jan 31 14:04:26 2018
@@ -119,31 +119,31 @@ body: |
; ALL-LABEL: name: test_i8
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:gr8 = COPY [[COPY1]].sub_8bit
- ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY %edx
+ ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY $edx
; ALL: [[COPY4:%[0-9]+]]:gr8 = COPY [[COPY3]].sub_8bit
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.2, implicit %eflags
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.2, implicit $eflags
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr8 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0
- ; ALL: %al = COPY [[PHI]]
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[PHI]]
+ ; ALL: RET 0, implicit $al
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:gpr(s32) = COPY %edi
- %3:gpr(s32) = COPY %esi
+ %0:gpr(s32) = COPY $edi
+ %3:gpr(s32) = COPY $esi
%1:gpr(s8) = G_TRUNC %3(s32)
- %4:gpr(s32) = COPY %edx
+ %4:gpr(s32) = COPY $edx
%2:gpr(s8) = G_TRUNC %4(s32)
%5:gpr(s32) = G_CONSTANT i32 0
%6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -155,8 +155,8 @@ body: |
bb.3.cond.end:
%7:gpr(s8) = G_PHI %2(s8), %bb.2, %1(s8), %bb.1
- %al = COPY %7(s8)
- RET 0, implicit %al
+ $al = COPY %7(s8)
+ RET 0, implicit $al
...
---
@@ -178,31 +178,31 @@ body: |
; ALL-LABEL: name: test_i16
; ALL: bb.0.entry:
; ALL: successors: %bb.2(0x40000000), %bb.1(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
; ALL: [[COPY2:%[0-9]+]]:gr16 = COPY [[COPY1]].sub_16bit
- ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY %edx
+ ; ALL: [[COPY3:%[0-9]+]]:gr32 = COPY $edx
; ALL: [[COPY4:%[0-9]+]]:gr16 = COPY [[COPY3]].sub_16bit
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.2, implicit %eflags
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.2, implicit $eflags
; ALL: bb.1.cond.false:
; ALL: successors: %bb.2(0x80000000)
; ALL: bb.2.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr16 = PHI [[COPY4]], %bb.1, [[COPY2]], %bb.0
- ; ALL: %ax = COPY [[PHI]]
- ; ALL: RET 0, implicit %ax
+ ; ALL: $ax = COPY [[PHI]]
+ ; ALL: RET 0, implicit $ax
bb.1.entry:
successors: %bb.3(0x40000000), %bb.2(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0:gpr(s32) = COPY %edi
- %3:gpr(s32) = COPY %esi
+ %0:gpr(s32) = COPY $edi
+ %3:gpr(s32) = COPY $esi
%1:gpr(s16) = G_TRUNC %3(s32)
- %4:gpr(s32) = COPY %edx
+ %4:gpr(s32) = COPY $edx
%2:gpr(s16) = G_TRUNC %4(s32)
%5:gpr(s32) = G_CONSTANT i32 0
%6:gpr(s1) = G_ICMP intpred(sgt), %0(s32), %5
@@ -214,8 +214,8 @@ body: |
bb.3.cond.end:
%7:gpr(s16) = G_PHI %2(s16), %bb.2, %1(s16), %bb.1
- %ax = COPY %7(s16)
- RET 0, implicit %ax
+ $ax = COPY %7(s16)
+ RET 0, implicit $ax
...
---
@@ -235,15 +235,15 @@ body: |
; ALL-LABEL: name: test_i32
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %edx, %esi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY %edx
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.1, implicit %eflags
+ ; ALL: liveins: $edi, $edx, $esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[COPY2:%[0-9]+]]:gr32 = COPY $edx
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.1, implicit $eflags
; ALL: JMP_1 %bb.2
; ALL: bb.1.cond.true:
; ALL: successors: %bb.3(0x80000000)
@@ -252,15 +252,15 @@ body: |
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr32 = PHI [[COPY1]], %bb.1, [[COPY2]], %bb.2
- ; ALL: %eax = COPY [[PHI]]
- ; ALL: RET 0, implicit %eax
+ ; ALL: $eax = COPY [[PHI]]
+ ; ALL: RET 0, implicit $eax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %edx, %esi
+ liveins: $edi, $edx, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
- %2(s32) = COPY %edx
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
+ %2(s32) = COPY $edx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -277,8 +277,8 @@ body: |
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %eax = COPY %5(s32)
- RET 0, implicit %eax
+ $eax = COPY %5(s32)
+ RET 0, implicit $eax
...
---
@@ -298,15 +298,15 @@ body: |
; ALL-LABEL: name: test_i64
; ALL: bb.0.entry:
; ALL: successors: %bb.1(0x40000000), %bb.2(0x40000000)
- ; ALL: liveins: %edi, %rdx, %rsi
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY %rdx
- ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def %eflags
- ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def %eflags
- ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit %eflags
- ; ALL: TEST8ri [[SETGr]], 1, implicit-def %eflags
- ; ALL: JNE_1 %bb.1, implicit %eflags
+ ; ALL: liveins: $edi, $rdx, $rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[COPY2:%[0-9]+]]:gr64 = COPY $rdx
+ ; ALL: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
+ ; ALL: CMP32rr [[COPY]], [[MOV32r0_]], implicit-def $eflags
+ ; ALL: [[SETGr:%[0-9]+]]:gr8 = SETGr implicit $eflags
+ ; ALL: TEST8ri [[SETGr]], 1, implicit-def $eflags
+ ; ALL: JNE_1 %bb.1, implicit $eflags
; ALL: JMP_1 %bb.2
; ALL: bb.1.cond.true:
; ALL: successors: %bb.3(0x80000000)
@@ -315,15 +315,15 @@ body: |
; ALL: successors: %bb.3(0x80000000)
; ALL: bb.3.cond.end:
; ALL: [[PHI:%[0-9]+]]:gr64 = PHI [[COPY1]], %bb.1, [[COPY2]], %bb.2
- ; ALL: %rax = COPY [[PHI]]
- ; ALL: RET 0, implicit %rax
+ ; ALL: $rax = COPY [[PHI]]
+ ; ALL: RET 0, implicit $rax
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %rdx, %rsi
+ liveins: $edi, $rdx, $rsi
- %0(s32) = COPY %edi
- %1(s64) = COPY %rsi
- %2(s64) = COPY %rdx
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $rsi
+ %2(s64) = COPY $rdx
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -340,8 +340,8 @@ body: |
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %rax = COPY %5(s64)
- RET 0, implicit %rax
+ $rax = COPY %5(s64)
+ RET 0, implicit $rax
...
---
@@ -371,16 +371,16 @@ stack:
constants:
# ALL-LABEL: bb.3.cond.end:
# ALL: %5:fr32 = PHI %1, %bb.1, %2, %bb.2
-# ALL-NEXT: %xmm0 = COPY %5
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %5
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s32) = COPY %xmm0
- %2(s32) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $xmm0
+ %2(s32) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -397,8 +397,8 @@ body: |
bb.4.cond.end:
%5(s32) = G_PHI %1(s32), %bb.2, %2(s32), %bb.3
- %xmm0 = COPY %5(s32)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s32)
+ RET 0, implicit $xmm0
...
---
@@ -424,16 +424,16 @@ registers:
- { id: 5, class: vecr, preferred-register: '' }
# ALL-LABEL: bb.3.cond.end:
# ALL: %5:fr64 = PHI %1, %bb.1, %2, %bb.2
-# ALL-NEXT: %xmm0 = COPY %5
-# ALL-NEXT: RET 0, implicit %xmm0
+# ALL-NEXT: $xmm0 = COPY %5
+# ALL-NEXT: RET 0, implicit $xmm0
body: |
bb.1.entry:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
- liveins: %edi, %xmm0, %xmm1
+ liveins: $edi, $xmm0, $xmm1
- %0(s32) = COPY %edi
- %1(s64) = COPY %xmm0
- %2(s64) = COPY %xmm1
+ %0(s32) = COPY $edi
+ %1(s64) = COPY $xmm0
+ %2(s64) = COPY $xmm1
%3(s32) = G_CONSTANT i32 0
%4(s1) = G_ICMP intpred(sgt), %0(s32), %3
G_BRCOND %4(s1), %bb.2
@@ -450,7 +450,7 @@ body: |
bb.4.cond.end:
%5(s64) = G_PHI %1(s64), %bb.2, %2(s64), %bb.3
- %xmm0 = COPY %5(s64)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %5(s64)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v128.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v128.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v128.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v128.mir Wed Jan 31 14:04:26 2018
@@ -44,13 +44,13 @@ registers:
# AVX512BWVL: %2:vr128x = VPSUBBZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<16 x s8>) = COPY %xmm0
- %1(<16 x s8>) = COPY %xmm1
+ %0(<16 x s8>) = COPY $xmm0
+ %1(<16 x s8>) = COPY $xmm1
%2(<16 x s8>) = G_SUB %0, %1
- %xmm0 = COPY %2(<16 x s8>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<16 x s8>)
+ RET 0, implicit $xmm0
...
---
@@ -72,13 +72,13 @@ registers:
# AVX512BWVL: %2:vr128x = VPSUBWZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<8 x s16>) = COPY %xmm0
- %1(<8 x s16>) = COPY %xmm1
+ %0(<8 x s16>) = COPY $xmm0
+ %1(<8 x s16>) = COPY $xmm1
%2(<8 x s16>) = G_SUB %0, %1
- %xmm0 = COPY %2(<8 x s16>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<8 x s16>)
+ RET 0, implicit $xmm0
...
---
@@ -100,13 +100,13 @@ registers:
# AVX512BWVL: %2:vr128x = VPSUBDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_SUB %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -128,12 +128,12 @@ registers:
# AVX512BWVL: %2:vr128x = VPSUBQZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<2 x s64>) = COPY %xmm0
- %1(<2 x s64>) = COPY %xmm1
+ %0(<2 x s64>) = COPY $xmm0
+ %1(<2 x s64>) = COPY $xmm1
%2(<2 x s64>) = G_SUB %0, %1
- %xmm0 = COPY %2(<2 x s64>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<2 x s64>)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v256.mir Wed Jan 31 14:04:26 2018
@@ -40,13 +40,13 @@ registers:
# AVX512BWVL: %2:vr256x = VPSUBBZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<32 x s8>) = COPY %ymm0
- %1(<32 x s8>) = COPY %ymm1
+ %0(<32 x s8>) = COPY $ymm0
+ %1(<32 x s8>) = COPY $ymm1
%2(<32 x s8>) = G_SUB %0, %1
- %ymm0 = COPY %2(<32 x s8>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<32 x s8>)
+ RET 0, implicit $ymm0
...
---
@@ -66,13 +66,13 @@ registers:
# AVX512BWVL: %2:vr256x = VPSUBWZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<16 x s16>) = COPY %ymm0
- %1(<16 x s16>) = COPY %ymm1
+ %0(<16 x s16>) = COPY $ymm0
+ %1(<16 x s16>) = COPY $ymm1
%2(<16 x s16>) = G_SUB %0, %1
- %ymm0 = COPY %2(<16 x s16>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<16 x s16>)
+ RET 0, implicit $ymm0
...
---
@@ -92,13 +92,13 @@ registers:
# AVX512BWVL: %2:vr256x = VPSUBDZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<8 x s32>) = COPY %ymm0
- %1(<8 x s32>) = COPY %ymm1
+ %0(<8 x s32>) = COPY $ymm0
+ %1(<8 x s32>) = COPY $ymm1
%2(<8 x s32>) = G_SUB %0, %1
- %ymm0 = COPY %2(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<8 x s32>)
+ RET 0, implicit $ymm0
...
---
@@ -118,12 +118,12 @@ registers:
# AVX512BWVL: %2:vr256x = VPSUBQZ256rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %ymm0, %ymm1
+ liveins: $ymm0, $ymm1
- %0(<4 x s64>) = COPY %ymm0
- %1(<4 x s64>) = COPY %ymm1
+ %0(<4 x s64>) = COPY $ymm0
+ %1(<4 x s64>) = COPY $ymm1
%2(<4 x s64>) = G_SUB %0, %1
- %ymm0 = COPY %2(<4 x s64>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %2(<4 x s64>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub-v512.mir Wed Jan 31 14:04:26 2018
@@ -36,19 +36,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v64i8
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBBZrr:%[0-9]+]]:vr512 = VPSUBBZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBBZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<64 x s8>) = COPY %zmm0
- %1(<64 x s8>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBBZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<64 x s8>) = COPY $zmm0
+ %1(<64 x s8>) = COPY $zmm1
%2(<64 x s8>) = G_SUB %0, %1
- %zmm0 = COPY %2(<64 x s8>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<64 x s8>)
+ RET 0, implicit $zmm0
...
---
@@ -62,19 +62,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v32i16
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBWZrr:%[0-9]+]]:vr512 = VPSUBWZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBWZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<32 x s16>) = COPY %zmm0
- %1(<32 x s16>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBWZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<32 x s16>) = COPY $zmm0
+ %1(<32 x s16>) = COPY $zmm1
%2(<32 x s16>) = G_SUB %0, %1
- %zmm0 = COPY %2(<32 x s16>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<32 x s16>)
+ RET 0, implicit $zmm0
...
---
@@ -88,19 +88,19 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v16i32
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBDZrr:%[0-9]+]]:vr512 = VPSUBDZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBDZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<16 x s32>) = COPY %zmm0
- %1(<16 x s32>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBDZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<16 x s32>) = COPY $zmm0
+ %1(<16 x s32>) = COPY $zmm1
%2(<16 x s32>) = G_SUB %0, %1
- %zmm0 = COPY %2(<16 x s32>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<16 x s32>)
+ RET 0, implicit $zmm0
...
---
@@ -114,18 +114,18 @@ registers:
- { id: 2, class: vecr }
body: |
bb.1 (%ir-block.0):
- liveins: %zmm0, %zmm1
+ liveins: $zmm0, $zmm1
; ALL-LABEL: name: test_sub_v8i64
- ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY %zmm0
- ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY %zmm1
+ ; ALL: [[COPY:%[0-9]+]]:vr512 = COPY $zmm0
+ ; ALL: [[COPY1:%[0-9]+]]:vr512 = COPY $zmm1
; ALL: [[VPSUBQZrr:%[0-9]+]]:vr512 = VPSUBQZrr [[COPY]], [[COPY1]]
- ; ALL: %zmm0 = COPY [[VPSUBQZrr]]
- ; ALL: RET 0, implicit %zmm0
- %0(<8 x s64>) = COPY %zmm0
- %1(<8 x s64>) = COPY %zmm1
+ ; ALL: $zmm0 = COPY [[VPSUBQZrr]]
+ ; ALL: RET 0, implicit $zmm0
+ %0(<8 x s64>) = COPY $zmm0
+ %1(<8 x s64>) = COPY $zmm1
%2(<8 x s64>) = G_SUB %0, %1
- %zmm0 = COPY %2(<8 x s64>)
- RET 0, implicit %zmm0
+ $zmm0 = COPY %2(<8 x s64>)
+ RET 0, implicit $zmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-sub.mir Wed Jan 31 14:04:26 2018
@@ -33,17 +33,17 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr64 = COPY %rdi
-# ALL-NEXT: %1:gr64 = COPY %rsi
+# ALL: %0:gr64 = COPY $rdi
+# ALL-NEXT: %1:gr64 = COPY $rsi
# ALL-NEXT: %2:gr64 = SUB64rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_SUB %0, %1
- %rax = COPY %2(s64)
+ $rax = COPY %2(s64)
...
@@ -55,17 +55,17 @@ registers:
- { id: 0, class: gpr }
- { id: 1, class: gpr }
- { id: 2, class: gpr }
-# ALL: %0:gr32 = COPY %edi
-# ALL-NEXT: %1:gr32 = COPY %esi
+# ALL: %0:gr32 = COPY $edi
+# ALL-NEXT: %1:gr32 = COPY $esi
# ALL-NEXT: %2:gr32 = SUB32rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_SUB %0, %1
- %eax = COPY %2(s32)
+ $eax = COPY %2(s32)
...
---
@@ -79,23 +79,23 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# NO_AVX512VL: %0:vr128 = COPY %xmm0
-# AVX512VL: %0:vr128x = COPY %xmm0
-# NO_AVX512VL: %1:vr128 = COPY %xmm1
-# AVX512VL: %1:vr128x = COPY %xmm1
+# NO_AVX512VL: %0:vr128 = COPY $xmm0
+# AVX512VL: %0:vr128x = COPY $xmm0
+# NO_AVX512VL: %1:vr128 = COPY $xmm1
+# AVX512VL: %1:vr128x = COPY $xmm1
# SSE-NEXT: %2:vr128 = PSUBDrr %0, %1
# AVX-NEXT: %2:vr128 = VPSUBDrr %0, %1
# AVX512F-NEXT: %2:vr128 = VPSUBDrr %0, %1
# AVX512VL-NEXT: %2:vr128x = VPSUBDZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_SUB %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -109,23 +109,23 @@ registers:
- { id: 0, class: vecr }
- { id: 1, class: vecr }
- { id: 2, class: vecr }
-# NO_AVX512VL: %0:vr128 = COPY %xmm0
-# NO_AVX512VL: %1:vr128 = COPY %xmm1
+# NO_AVX512VL: %0:vr128 = COPY $xmm0
+# NO_AVX512VL: %1:vr128 = COPY $xmm1
# SSE-NEXT: %2:vr128 = SUBPSrr %0, %1
# AVX-NEXT: %2:vr128 = VSUBPSrr %0, %1
# AVX512F-NEXT: %2:vr128 = VSUBPSrr %0, %1
#
-# AVX512VL: %0:vr128x = COPY %xmm0
-# AVX512VL: %1:vr128x = COPY %xmm1
+# AVX512VL: %0:vr128x = COPY $xmm0
+# AVX512VL: %1:vr128x = COPY $xmm1
# AVX512VL-NEXT: %2:vr128x = VSUBPSZ128rr %0, %1
body: |
bb.1 (%ir-block.0):
- liveins: %xmm0, %xmm1
+ liveins: $xmm0, $xmm1
- %0(<4 x s32>) = COPY %xmm0
- %1(<4 x s32>) = COPY %xmm1
+ %0(<4 x s32>) = COPY $xmm0
+ %1(<4 x s32>) = COPY $xmm1
%2(<4 x s32>) = G_FSUB %0, %1
- %xmm0 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-trunc.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-trunc.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-trunc.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-trunc.mir Wed Jan 31 14:04:26 2018
@@ -43,18 +43,18 @@ registers:
- { id: 2, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: trunc_i32toi1
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
- ; CHECK: %al = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %al
- %0(s32) = COPY %edi
+ ; CHECK: $al = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $al
+ %0(s32) = COPY $edi
%1(s1) = G_TRUNC %0(s32)
%2(s8) = G_ANYEXT %1(s1)
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -67,17 +67,17 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: trunc_i32toi8
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
- ; CHECK: %al = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %al
- %0(s32) = COPY %edi
+ ; CHECK: $al = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $al
+ %0(s32) = COPY $edi
%1(s8) = G_TRUNC %0(s32)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -90,17 +90,17 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; CHECK-LABEL: name: trunc_i32toi16
- ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY %edi
+ ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi
; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; CHECK: %ax = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %ax
- %0(s32) = COPY %edi
+ ; CHECK: $ax = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $ax
+ %0(s32) = COPY $edi
%1(s16) = G_TRUNC %0(s32)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -113,17 +113,17 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: trunc_i64toi8
- ; CHECK: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit
- ; CHECK: %al = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %al
- %0(s64) = COPY %rdi
+ ; CHECK: $al = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $al
+ %0(s64) = COPY $rdi
%1(s8) = G_TRUNC %0(s64)
- %al = COPY %1(s8)
- RET 0, implicit %al
+ $al = COPY %1(s8)
+ RET 0, implicit $al
...
---
@@ -136,17 +136,17 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: trunc_i64toi16
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit
- ; CHECK: %ax = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %ax
- %0(s64) = COPY %rdi
+ ; CHECK: $ax = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $ax
+ %0(s64) = COPY $rdi
%1(s16) = G_TRUNC %0(s64)
- %ax = COPY %1(s16)
- RET 0, implicit %ax
+ $ax = COPY %1(s16)
+ RET 0, implicit $ax
...
---
@@ -159,16 +159,16 @@ registers:
- { id: 1, class: gpr }
body: |
bb.1 (%ir-block.0):
- liveins: %rdi
+ liveins: $rdi
; CHECK-LABEL: name: trunc_i64toi32
- ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
+ ; CHECK: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
; CHECK: [[COPY1:%[0-9]+]]:gr32 = COPY [[COPY]].sub_32bit
- ; CHECK: %eax = COPY [[COPY1]]
- ; CHECK: RET 0, implicit %eax
- %0(s64) = COPY %rdi
+ ; CHECK: $eax = COPY [[COPY1]]
+ ; CHECK: RET 0, implicit $eax
+ %0(s64) = COPY $rdi
%1(s32) = G_TRUNC %0(s64)
- %eax = COPY %1(s32)
- RET 0, implicit %eax
+ $eax = COPY %1(s32)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-undef.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-undef.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-undef.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-undef.mir Wed Jan 31 14:04:26 2018
@@ -27,11 +27,11 @@ body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test
; ALL: [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF
- ; ALL: %al = COPY [[DEF]]
- ; ALL: RET 0, implicit %al
+ ; ALL: $al = COPY [[DEF]]
+ ; ALL: RET 0, implicit $al
%0(s8) = G_IMPLICIT_DEF
- %al = COPY %0(s8)
- RET 0, implicit %al
+ $al = COPY %0(s8)
+ RET 0, implicit $al
...
---
@@ -49,18 +49,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi
+ liveins: $edi
; ALL-LABEL: name: test2
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
; ALL: [[DEF:%[0-9]+]]:gr8 = IMPLICIT_DEF
- ; ALL: [[ADD8rr:%[0-9]+]]:gr8 = ADD8rr [[COPY]], [[DEF]], implicit-def %eflags
- ; ALL: %al = COPY [[ADD8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
+ ; ALL: [[ADD8rr:%[0-9]+]]:gr8 = ADD8rr [[COPY]], [[DEF]], implicit-def $eflags
+ ; ALL: $al = COPY [[ADD8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
%1(s8) = G_IMPLICIT_DEF
%2(s8) = G_ADD %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec256.mir Wed Jan 31 14:04:26 2018
@@ -26,21 +26,21 @@ body: |
; AVX: [[DEF:%[0-9]+]]:vr256 = IMPLICIT_DEF
; AVX: [[COPY:%[0-9]+]]:vr128 = COPY [[DEF]].sub_xmm
; AVX: [[VEXTRACTF128rr:%[0-9]+]]:vr128 = VEXTRACTF128rr [[DEF]], 1
- ; AVX: %xmm0 = COPY [[COPY]]
- ; AVX: %xmm1 = COPY [[VEXTRACTF128rr]]
- ; AVX: RET 0, implicit %xmm0, implicit %xmm1
+ ; AVX: $xmm0 = COPY [[COPY]]
+ ; AVX: $xmm1 = COPY [[VEXTRACTF128rr]]
+ ; AVX: RET 0, implicit $xmm0, implicit $xmm1
; AVX512VL-LABEL: name: test_unmerge
; AVX512VL: [[DEF:%[0-9]+]]:vr256x = IMPLICIT_DEF
; AVX512VL: [[COPY:%[0-9]+]]:vr128x = COPY [[DEF]].sub_xmm
; AVX512VL: [[VEXTRACTF32x4Z256rr:%[0-9]+]]:vr128x = VEXTRACTF32x4Z256rr [[DEF]], 1
- ; AVX512VL: %xmm0 = COPY [[COPY]]
- ; AVX512VL: %xmm1 = COPY [[VEXTRACTF32x4Z256rr]]
- ; AVX512VL: RET 0, implicit %xmm0, implicit %xmm1
+ ; AVX512VL: $xmm0 = COPY [[COPY]]
+ ; AVX512VL: $xmm1 = COPY [[VEXTRACTF32x4Z256rr]]
+ ; AVX512VL: RET 0, implicit $xmm0, implicit $xmm1
%0(<8 x s32>) = IMPLICIT_DEF
%1(<4 x s32>), %2(<4 x s32>) = G_UNMERGE_VALUES %0(<8 x s32>)
- %xmm0 = COPY %1(<4 x s32>)
- %xmm1 = COPY %2(<4 x s32>)
- RET 0, implicit %xmm0, implicit %xmm1
+ $xmm0 = COPY %1(<4 x s32>)
+ $xmm1 = COPY %2(<4 x s32>)
+ RET 0, implicit $xmm0, implicit $xmm1
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-unmerge-vec512.mir Wed Jan 31 14:04:26 2018
@@ -30,12 +30,12 @@ body: |
; ALL: [[VEXTRACTF32x4Zrr:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 1
; ALL: [[VEXTRACTF32x4Zrr1:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 2
; ALL: [[VEXTRACTF32x4Zrr2:%[0-9]+]]:vr128x = VEXTRACTF32x4Zrr [[DEF]], 3
- ; ALL: %xmm0 = COPY [[COPY]]
- ; ALL: RET 0, implicit %xmm0
+ ; ALL: $xmm0 = COPY [[COPY]]
+ ; ALL: RET 0, implicit $xmm0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<4 x s32>), %2(<4 x s32>), %3(<4 x s32>), %4(<4 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>)
- %xmm0 = COPY %1(<4 x s32>)
- RET 0, implicit %xmm0
+ $xmm0 = COPY %1(<4 x s32>)
+ RET 0, implicit $xmm0
...
---
@@ -54,11 +54,11 @@ body: |
; ALL: [[DEF:%[0-9]+]]:vr512 = IMPLICIT_DEF
; ALL: [[COPY:%[0-9]+]]:vr256x = COPY [[DEF]].sub_ymm
; ALL: [[VEXTRACTF64x4Zrr:%[0-9]+]]:vr256x = VEXTRACTF64x4Zrr [[DEF]], 1
- ; ALL: %ymm0 = COPY [[COPY]]
- ; ALL: RET 0, implicit %ymm0
+ ; ALL: $ymm0 = COPY [[COPY]]
+ ; ALL: RET 0, implicit $ymm0
%0(<16 x s32>) = IMPLICIT_DEF
%1(<8 x s32>), %2(<8 x s32>) = G_UNMERGE_VALUES %0(<16 x s32>)
- %ymm0 = COPY %1(<8 x s32>)
- RET 0, implicit %ymm0
+ $ymm0 = COPY %1(<8 x s32>)
+ RET 0, implicit $ymm0
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-xor-scalar.mir Wed Jan 31 14:04:26 2018
@@ -38,19 +38,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_xor_i8
- ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY %dil
- ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY %sil
- ; ALL: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %al = COPY [[XOR8rr]]
- ; ALL: RET 0, implicit %al
- %0(s8) = COPY %dil
- %1(s8) = COPY %sil
+ ; ALL: [[COPY:%[0-9]+]]:gr8 = COPY $dil
+ ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY $sil
+ ; ALL: [[XOR8rr:%[0-9]+]]:gr8 = XOR8rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $al = COPY [[XOR8rr]]
+ ; ALL: RET 0, implicit $al
+ %0(s8) = COPY $dil
+ %1(s8) = COPY $sil
%2(s8) = G_XOR %0, %1
- %al = COPY %2(s8)
- RET 0, implicit %al
+ $al = COPY %2(s8)
+ RET 0, implicit $al
...
---
@@ -68,19 +68,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_xor_i16
- ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY %di
- ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY %si
- ; ALL: [[XOR16rr:%[0-9]+]]:gr16 = XOR16rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %ax = COPY [[XOR16rr]]
- ; ALL: RET 0, implicit %ax
- %0(s16) = COPY %di
- %1(s16) = COPY %si
+ ; ALL: [[COPY:%[0-9]+]]:gr16 = COPY $di
+ ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY $si
+ ; ALL: [[XOR16rr:%[0-9]+]]:gr16 = XOR16rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $ax = COPY [[XOR16rr]]
+ ; ALL: RET 0, implicit $ax
+ %0(s16) = COPY $di
+ %1(s16) = COPY $si
%2(s16) = G_XOR %0, %1
- %ax = COPY %2(s16)
- RET 0, implicit %ax
+ $ax = COPY %2(s16)
+ RET 0, implicit $ax
...
---
@@ -98,19 +98,19 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %edi, %esi
+ liveins: $edi, $esi
; ALL-LABEL: name: test_xor_i32
- ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY %edi
- ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY %esi
- ; ALL: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %eax = COPY [[XOR32rr]]
- ; ALL: RET 0, implicit %eax
- %0(s32) = COPY %edi
- %1(s32) = COPY %esi
+ ; ALL: [[COPY:%[0-9]+]]:gr32 = COPY $edi
+ ; ALL: [[COPY1:%[0-9]+]]:gr32 = COPY $esi
+ ; ALL: [[XOR32rr:%[0-9]+]]:gr32 = XOR32rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $eax = COPY [[XOR32rr]]
+ ; ALL: RET 0, implicit $eax
+ %0(s32) = COPY $edi
+ %1(s32) = COPY $esi
%2(s32) = G_XOR %0, %1
- %eax = COPY %2(s32)
- RET 0, implicit %eax
+ $eax = COPY %2(s32)
+ RET 0, implicit $eax
...
---
@@ -128,18 +128,18 @@ stack:
constants:
body: |
bb.1 (%ir-block.0):
- liveins: %rdi, %rsi
+ liveins: $rdi, $rsi
; ALL-LABEL: name: test_xor_i64
- ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY %rdi
- ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY %rsi
- ; ALL: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[COPY1]], implicit-def %eflags
- ; ALL: %rax = COPY [[XOR64rr]]
- ; ALL: RET 0, implicit %rax
- %0(s64) = COPY %rdi
- %1(s64) = COPY %rsi
+ ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi
+ ; ALL: [[COPY1:%[0-9]+]]:gr64 = COPY $rsi
+ ; ALL: [[XOR64rr:%[0-9]+]]:gr64 = XOR64rr [[COPY]], [[COPY1]], implicit-def $eflags
+ ; ALL: $rax = COPY [[XOR64rr]]
+ ; ALL: RET 0, implicit $rax
+ %0(s64) = COPY $rdi
+ %1(s64) = COPY $rsi
%2(s64) = G_XOR %0, %1
- %rax = COPY %2(s64)
- RET 0, implicit %rax
+ $rax = COPY %2(s64)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x32-select-frameIndex.mir Wed Jan 31 14:04:26 2018
@@ -24,11 +24,11 @@ stack:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: allocai32
- ; CHECK: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r %stack.0.ptr1, 1, %noreg, 0, %noreg
- ; CHECK: %eax = COPY [[LEA64_32r]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[LEA64_32r:%[0-9]+]]:gr32 = LEA64_32r %stack.0.ptr1, 1, $noreg, 0, $noreg
+ ; CHECK: $eax = COPY [[LEA64_32r]]
+ ; CHECK: RET 0, implicit $eax
%0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1
- %eax = COPY %0(p0)
- RET 0, implicit %eax
+ $eax = COPY %0(p0)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-GV.mir Wed Jan 31 14:04:26 2018
@@ -19,12 +19,12 @@ regBankSelected: false
registers:
- { id: 0, class: _, preferred-register: '' }
# ALL: %0:_(p0) = G_GLOBAL_VALUE @g_int
-# ALL-NEXT: %eax = COPY %0(p0)
-# ALL-NEXT: RET 0, implicit %rax
+# ALL-NEXT: $eax = COPY %0(p0)
+# ALL-NEXT: RET 0, implicit $rax
body: |
bb.1.entry:
%0(p0) = G_GLOBAL_VALUE @g_int
- %eax = COPY %0(p0)
- RET 0, implicit %rax
+ $eax = COPY %0(p0)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-frameIndex.mir Wed Jan 31 14:04:26 2018
@@ -24,11 +24,11 @@ stack:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: allocai32
- ; CHECK: [[LEA32r:%[0-9]+]]:gr32 = LEA32r %stack.0.ptr1, 1, %noreg, 0, %noreg
- ; CHECK: %eax = COPY [[LEA32r]]
- ; CHECK: RET 0, implicit %eax
+ ; CHECK: [[LEA32r:%[0-9]+]]:gr32 = LEA32r %stack.0.ptr1, 1, $noreg, 0, $noreg
+ ; CHECK: $eax = COPY [[LEA32r]]
+ ; CHECK: RET 0, implicit $eax
%0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1
- %eax = COPY %0(p0)
- RET 0, implicit %eax
+ $eax = COPY %0(p0)
+ RET 0, implicit $eax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-legalize-GV.mir Wed Jan 31 14:04:26 2018
@@ -19,12 +19,12 @@ regBankSelected: false
registers:
- { id: 0, class: _, preferred-register: '' }
# ALL: %0:_(p0) = G_GLOBAL_VALUE @g_int
-# ALL-NEXT: %rax = COPY %0(p0)
-# ALL-NEXT: RET 0, implicit %rax
+# ALL-NEXT: $rax = COPY %0(p0)
+# ALL-NEXT: RET 0, implicit $rax
body: |
bb.1.entry:
%0(p0) = G_GLOBAL_VALUE @g_int
- %rax = COPY %0(p0)
- RET 0, implicit %rax
+ $rax = COPY %0(p0)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86_64-select-frameIndex.mir Wed Jan 31 14:04:26 2018
@@ -24,11 +24,11 @@ stack:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: allocai32
- ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0.ptr1, 1, %noreg, 0, %noreg
- ; CHECK: %rax = COPY [[LEA64r]]
- ; CHECK: RET 0, implicit %rax
+ ; CHECK: [[LEA64r:%[0-9]+]]:gr64 = LEA64r %stack.0.ptr1, 1, $noreg, 0, $noreg
+ ; CHECK: $rax = COPY [[LEA64r]]
+ ; CHECK: RET 0, implicit $rax
%0:gpr(p0) = G_FRAME_INDEX %stack.0.ptr1
- %rax = COPY %0(p0)
- RET 0, implicit %rax
+ $rax = COPY %0(p0)
+ RET 0, implicit $rax
...
Modified: llvm/trunk/test/CodeGen/X86/add-i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add-i64.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add-i64.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add-i64.ll Wed Jan 31 14:04:26 2018
@@ -17,7 +17,7 @@ define i32 @pr32690(i32) {
; X64-NEXT: movl %edi, %eax
; X64-NEXT: addq $63, %rax
; X64-NEXT: shrq $6, %rax
-; X64-NEXT: # kill: def %eax killed %eax killed %rax
+; X64-NEXT: # kill: def $eax killed $eax killed $rax
; X64-NEXT: retq
%2 = zext i32 %0 to i64
%3 = add nuw nsw i64 %2, 63
Modified: llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add-sub-nsw-nuw.ll Wed Jan 31 14:04:26 2018
@@ -10,7 +10,7 @@ define i8 @PR30841(i64 %argc) {
; CHECK: ## %bb.0: ## %entry
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
; CHECK-NEXT: negl %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retl
entry:
%or = or i64 %argc, -4294967296
Modified: llvm/trunk/test/CodeGen/X86/add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/add.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/add.ll (original)
+++ llvm/trunk/test/CodeGen/X86/add.ll Wed Jan 31 14:04:26 2018
@@ -176,14 +176,14 @@ define i64 @test6(i64 %A, i32 %B) nounwi
;
; X64-LINUX-LABEL: test6:
; X64-LINUX: # %bb.0: # %entry
-; X64-LINUX-NEXT: # kill: def %esi killed %esi def %rsi
+; X64-LINUX-NEXT: # kill: def $esi killed $esi def $rsi
; X64-LINUX-NEXT: shlq $32, %rsi
; X64-LINUX-NEXT: leaq (%rsi,%rdi), %rax
; X64-LINUX-NEXT: retq
;
; X64-WIN32-LABEL: test6:
; X64-WIN32: # %bb.0: # %entry
-; X64-WIN32-NEXT: # kill: def %edx killed %edx def %rdx
+; X64-WIN32-NEXT: # kill: def $edx killed $edx def $rdx
; X64-WIN32-NEXT: shlq $32, %rdx
; X64-WIN32-NEXT: leaq (%rdx,%rcx), %rax
; X64-WIN32-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/addcarry.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/addcarry.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/addcarry.ll (original)
+++ llvm/trunk/test/CodeGen/X86/addcarry.ll Wed Jan 31 14:04:26 2018
@@ -84,7 +84,7 @@ entry:
define i8 @e(i32* nocapture %a, i32 %b) nounwind {
; CHECK-LABEL: e:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %esi killed %esi def %rsi
+; CHECK-NEXT: # kill: def $esi killed $esi def $rsi
; CHECK-NEXT: movl (%rdi), %ecx
; CHECK-NEXT: leal (%rsi,%rcx), %edx
; CHECK-NEXT: addl %esi, %edx
Modified: llvm/trunk/test/CodeGen/X86/and-encoding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/and-encoding.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/and-encoding.ll (original)
+++ llvm/trunk/test/CodeGen/X86/and-encoding.ll Wed Jan 31 14:04:26 2018
@@ -110,7 +110,7 @@ define i32 @shrinkAndKnownBits(i32 %x) {
; CHECK-NEXT: imulq %rcx, %rax # encoding: [0x48,0x0f,0xaf,0xc1]
; CHECK-NEXT: shrq $36, %rax # encoding: [0x48,0xc1,0xe8,0x24]
; CHECK-NEXT: andl $-128, %eax # encoding: [0x83,0xe0,0x80]
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq # encoding: [0xc3]
%div = udiv i32 %x, 17
%and = and i32 %div, 268435328
Modified: llvm/trunk/test/CodeGen/X86/anyext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/anyext.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/anyext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/anyext.ll Wed Jan 31 14:04:26 2018
@@ -8,7 +8,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) n
; X32-LABEL: foo:
; X32: # %bb.0:
; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax
-; X32-NEXT: # kill: def %eax killed %eax def %ax
+; X32-NEXT: # kill: def $eax killed $eax def $ax
; X32-NEXT: divb {{[0-9]+}}(%esp)
; X32-NEXT: movzbl %al, %eax
; X32-NEXT: andl $1, %eax
@@ -17,7 +17,7 @@ define i32 @foo(i32 %p, i8 zeroext %x) n
; X64-LABEL: foo:
; X64: # %bb.0:
; X64-NEXT: movzbl %dil, %eax
-; X64-NEXT: # kill: def %eax killed %eax def %ax
+; X64-NEXT: # kill: def $eax killed $eax def $ax
; X64-NEXT: divb %sil
; X64-NEXT: movzbl %al, %eax
; X64-NEXT: andl $1, %eax
@@ -35,7 +35,7 @@ define i32 @bar(i32 %p, i16 zeroext %x)
; X32-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divw {{[0-9]+}}(%esp)
-; X32-NEXT: # kill: def %ax killed %ax def %eax
+; X32-NEXT: # kill: def $ax killed $ax def $eax
; X32-NEXT: andl $1, %eax
; X32-NEXT: retl
;
@@ -44,7 +44,7 @@ define i32 @bar(i32 %p, i16 zeroext %x)
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: movl %edi, %eax
; X64-NEXT: divw %si
-; X64-NEXT: # kill: def %ax killed %ax def %eax
+; X64-NEXT: # kill: def $ax killed $ax def $eax
; X64-NEXT: andl $1, %eax
; X64-NEXT: retq
%q = trunc i32 %p to i16
Modified: llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll (original)
+++ llvm/trunk/test/CodeGen/X86/atomic-eflags-reuse.ll Wed Jan 31 14:04:26 2018
@@ -93,7 +93,7 @@ define i8 @test_add_1_setcc_slt(i64* %p)
; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: lock xaddq %rax, (%rdi)
; CHECK-NEXT: shrq $63, %rax
-; CHECK-NEXT: # kill: def %al killed %al killed %rax
+; CHECK-NEXT: # kill: def $al killed $al killed $rax
; CHECK-NEXT: retq
entry:
%tmp0 = atomicrmw add i64* %p, i64 1 seq_cst
Modified: llvm/trunk/test/CodeGen/X86/avx-cast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cast.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cast.ll Wed Jan 31 14:04:26 2018
@@ -9,7 +9,7 @@
define <8 x float> @castA(<4 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castA:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
@@ -20,7 +20,7 @@ define <8 x float> @castA(<4 x float> %m
define <4 x double> @castB(<2 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castB:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
@@ -33,7 +33,7 @@ define <4 x double> @castB(<2 x double>
define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castC:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX-NEXT: retq
@@ -47,7 +47,7 @@ define <4 x i64> @castC(<2 x i64> %m) no
define <4 x float> @castD(<8 x float> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castD:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <8 x float> %m, <8 x float> %m, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -57,7 +57,7 @@ define <4 x float> @castD(<8 x float> %m
define <2 x i64> @castE(<4 x i64> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castE:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x i64> %m, <4 x i64> %m, <2 x i32> <i32 0, i32 1>
@@ -67,7 +67,7 @@ define <2 x i64> @castE(<4 x i64> %m) no
define <2 x double> @castF(<4 x double> %m) nounwind uwtable readnone ssp {
; AVX-LABEL: castF:
; AVX: ## %bb.0:
-; AVX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%shuffle.i = shufflevector <4 x double> %m, <4 x double> %m, <2 x i32> <i32 0, i32 1>
Modified: llvm/trunk/test/CodeGen/X86/avx-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cmp.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cmp.ll Wed Jan 31 14:04:26 2018
@@ -197,7 +197,7 @@ define i32 @scalarcmpA() uwtable ssp {
; CHECK-NEXT: vcmpeqsd %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vmovq %xmm0, %rax
; CHECK-NEXT: andl $1, %eax
-; CHECK-NEXT: # kill: def %eax killed %eax killed %rax
+; CHECK-NEXT: # kill: def $eax killed $eax killed $rax
; CHECK-NEXT: retq
%cmp29 = fcmp oeq double undef, 0.000000e+00
%res = zext i1 %cmp29 to i32
Modified: llvm/trunk/test/CodeGen/X86/avx-insertelt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-insertelt.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-insertelt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-insertelt.ll Wed Jan 31 14:04:26 2018
@@ -5,7 +5,7 @@
define <8 x float> @insert_f32(<8 x float> %y, float %f, <8 x float> %x) {
; ALL-LABEL: insert_f32:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
; ALL-NEXT: retq
%i0 = insertelement <8 x float> %y, float %f, i32 0
@@ -15,7 +15,7 @@ define <8 x float> @insert_f32(<8 x floa
define <4 x double> @insert_f64(<4 x double> %y, double %f, <4 x double> %x) {
; ALL-LABEL: insert_f64:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; ALL-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7]
; ALL-NEXT: retq
%i0 = insertelement <4 x double> %y, double %f, i32 0
Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-fast-isel.ll Wed Jan 31 14:04:26 2018
@@ -316,12 +316,12 @@ define <4 x i64> @test_mm256_castpd_si25
define <4 x double> @test_mm256_castpd128_pd256(<2 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd128_pd256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd128_pd256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: retq
%res = shufflevector <2 x double> %a0, <2 x double> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x double> %res
@@ -330,13 +330,13 @@ define <4 x double> @test_mm256_castpd12
define <2 x double> @test_mm256_castpd256_pd128(<4 x double> %a0) nounwind {
; X32-LABEL: test_mm256_castpd256_pd128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castpd256_pd128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x double> %a0, <4 x double> %a0, <2 x i32> <i32 0, i32 1>
@@ -370,12 +370,12 @@ define <4 x i64> @test_mm256_castps_si25
define <8 x float> @test_mm256_castps128_ps256(<4 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps128_ps256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps128_ps256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef>
ret <8 x float> %res
@@ -384,13 +384,13 @@ define <8 x float> @test_mm256_castps128
define <4 x float> @test_mm256_castps256_ps128(<8 x float> %a0) nounwind {
; X32-LABEL: test_mm256_castps256_ps128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castps256_ps128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <8 x float> %a0, <8 x float> %a0, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@@ -400,12 +400,12 @@ define <4 x float> @test_mm256_castps256
define <4 x i64> @test_mm256_castsi128_si256(<2 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi128_si256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi128_si256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: retq
%res = shufflevector <2 x i64> %a0, <2 x i64> %a0, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
ret <4 x i64> %res
@@ -438,13 +438,13 @@ define <8 x float> @test_mm256_castsi256
define <2 x i64> @test_mm256_castsi256_si128(<4 x i64> %a0) nounwind {
; X32-LABEL: test_mm256_castsi256_si128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_castsi256_si128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shufflevector <4 x i64> %a0, <4 x i64> %a0, <2 x i32> <i32 0, i32 1>
@@ -1043,13 +1043,13 @@ define <4 x i64> @test_mm256_insert_epi6
define <4 x double> @test_mm256_insertf128_pd(<4 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_pd:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_pd:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X64-NEXT: retq
%ext = shufflevector <2 x double> %a1, <2 x double> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -1075,13 +1075,13 @@ define <8 x float> @test_mm256_insertf12
define <4 x i64> @test_mm256_insertf128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_insertf128_si256:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_insertf128_si256:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; X64-NEXT: retq
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
@@ -2188,13 +2188,13 @@ define <4 x i64> @test_mm256_set_epi64x(
define <8 x float> @test_mm256_set_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a1, <4 x float> %a0, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2204,13 +2204,13 @@ define <8 x float> @test_mm256_set_m128(
define <4 x double> @test_mm256_set_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128d:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128d:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
@@ -2223,13 +2223,13 @@ define <4 x double> @test_mm256_set_m128
define <4 x i64> @test_mm256_set_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_set_m128i:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_set_m128i:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; X64-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
@@ -2825,13 +2825,13 @@ define <4 x i64> @test_mm256_setr_epi64x
define <8 x float> @test_mm256_setr_m128(<4 x float> %a0, <4 x float> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -2841,13 +2841,13 @@ define <8 x float> @test_mm256_setr_m128
define <4 x double> @test_mm256_setr_m128d(<2 x double> %a0, <2 x double> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128d:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128d:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x double> %a0 to <4 x float>
@@ -2860,13 +2860,13 @@ define <4 x double> @test_mm256_setr_m12
define <4 x i64> @test_mm256_setr_m128i(<2 x i64> %a0, <2 x i64> %a1) nounwind {
; X32-LABEL: test_mm256_setr_m128i:
; X32: # %bb.0:
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: test_mm256_setr_m128i:
; X64: # %bb.0:
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: retq
%arg0 = bitcast <2 x i64> %a0 to <4 x float>
Modified: llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86-upgrade.ll Wed Jan 31 14:04:26 2018
@@ -39,7 +39,7 @@ define <8 x i32> @test_x86_avx_vinsertf1
define <8 x i32> @test_x86_avx_vinsertf128_si_256_2(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: test_x86_avx_vinsertf128_si_256_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> %a0, <4 x i32> %a1, i8 2)
@@ -88,7 +88,7 @@ declare <4 x i32> @llvm.x86.avx.vextract
define <2 x double> @test_x86_avx_extractf128_pd_256_2(<4 x double> %a0) {
; CHECK-LABEL: test_x86_avx_extractf128_pd_256_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: ret{{[l|q]}}
%res = call <2 x double> @llvm.x86.avx.vextractf128.pd.256(<4 x double> %a0, i8 2)
Modified: llvm/trunk/test/CodeGen/X86/avx-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-load-store.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-load-store.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-load-store.ll Wed Jan 31 14:04:26 2018
@@ -85,7 +85,7 @@ define <8 x float> @mov00(<8 x float> %v
; CHECK_O0-LABEL: mov00:
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; CHECK_O0-NEXT: # implicit-def: %ymm1
+; CHECK_O0-NEXT: # implicit-def: $ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK_O0-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3,4,5,6,7]
@@ -104,7 +104,7 @@ define <4 x double> @mov01(<4 x double>
; CHECK_O0-LABEL: mov01:
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
-; CHECK_O0-NEXT: # implicit-def: %ymm1
+; CHECK_O0-NEXT: # implicit-def: $ymm1
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
; CHECK_O0-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK_O0-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm2[1,2,3]
@@ -121,7 +121,7 @@ define void @storev16i16(<16 x i16> %a)
;
; CHECK_O0-LABEL: storev16i16:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 32
unreachable
@@ -135,7 +135,7 @@ define void @storev16i16_01(<16 x i16> %
;
; CHECK_O0-LABEL: storev16i16_01:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <16 x i16> %a, <16 x i16>* undef, align 4
unreachable
@@ -148,7 +148,7 @@ define void @storev32i8(<32 x i8> %a) no
;
; CHECK_O0-LABEL: storev32i8:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 32
unreachable
@@ -162,7 +162,7 @@ define void @storev32i8_01(<32 x i8> %a)
;
; CHECK_O0-LABEL: storev32i8_01:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %rax
+; CHECK_O0-NEXT: # implicit-def: $rax
; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax)
store <32 x i8> %a, <32 x i8>* undef, align 4
unreachable
@@ -179,7 +179,7 @@ define void @double_save(<4 x i32> %A, <
;
; CHECK_O0-LABEL: double_save:
; CHECK_O0: # %bb.0:
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
@@ -211,13 +211,13 @@ define void @f_f() nounwind {
;
; CHECK_O0-LABEL: f_f:
; CHECK_O0: # %bb.0: # %allocas
-; CHECK_O0-NEXT: # implicit-def: %al
+; CHECK_O0-NEXT: # implicit-def: $al
; CHECK_O0-NEXT: testb $1, %al
; CHECK_O0-NEXT: jne .LBB8_1
; CHECK_O0-NEXT: jmp .LBB8_2
; CHECK_O0-NEXT: .LBB8_1: # %cif_mask_all
; CHECK_O0-NEXT: .LBB8_2: # %cif_mask_mixed
-; CHECK_O0-NEXT: # implicit-def: %al
+; CHECK_O0-NEXT: # implicit-def: $al
; CHECK_O0-NEXT: testb $1, %al
; CHECK_O0-NEXT: jne .LBB8_3
; CHECK_O0-NEXT: jmp .LBB8_4
@@ -225,8 +225,8 @@ define void @f_f() nounwind {
; CHECK_O0-NEXT: movl $-1, %eax
; CHECK_O0-NEXT: vmovd %eax, %xmm0
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm1
-; CHECK_O0-NEXT: # implicit-def: %rcx
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $rcx
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmaskmovps %ymm2, %ymm1, (%rcx)
; CHECK_O0-NEXT: .LBB8_4: # %cif_mixed_test_any_check
allocas:
@@ -259,7 +259,7 @@ define void @add8i32(<8 x i32>* %ret, <8
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
@@ -304,7 +304,7 @@ define void @add4i64a16(<4 x i64>* %ret,
; CHECK_O0: # %bb.0:
; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm0
; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1
-; CHECK_O0-NEXT: # implicit-def: %ymm2
+; CHECK_O0-NEXT: # implicit-def: $ymm2
; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2
; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm2
; CHECK_O0-NEXT: vmovdqu %ymm2, (%rdi)
Modified: llvm/trunk/test/CodeGen/X86/avx-splat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-splat.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-splat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-splat.ll Wed Jan 31 14:04:26 2018
@@ -61,7 +61,7 @@ define <8 x float> @funcE() nounwind {
; CHECK: # %bb.0: # %for_exit499
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: # implicit-def: %ymm0
+; CHECK-NEXT: # implicit-def: $ymm0
; CHECK-NEXT: jne .LBB4_2
; CHECK-NEXT: # %bb.1: # %load.i1247
; CHECK-NEXT: pushq %rbp
Modified: llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll Wed Jan 31 14:04:26 2018
@@ -75,7 +75,7 @@ define <8 x i32> @DAGCombineB(<8 x i32>
define <4 x double> @insert_undef_pd(<4 x double> %a0, <2 x double> %a1) {
; CHECK-LABEL: insert_undef_pd:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <4 x double> @llvm.x86.avx.vinsertf128.pd.256(<4 x double> undef, <2 x double> %a1, i8 0)
@@ -86,7 +86,7 @@ declare <4 x double> @llvm.x86.avx.vinse
define <8 x float> @insert_undef_ps(<8 x float> %a0, <4 x float> %a1) {
; CHECK-LABEL: insert_undef_ps:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x float> @llvm.x86.avx.vinsertf128.ps.256(<8 x float> undef, <4 x float> %a1, i8 0)
@@ -97,7 +97,7 @@ declare <8 x float> @llvm.x86.avx.vinser
define <8 x i32> @insert_undef_si(<8 x i32> %a0, <4 x i32> %a1) {
; CHECK-LABEL: insert_undef_si:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vmovaps %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx.vinsertf128.si.256(<8 x i32> undef, <4 x i32> %a1, i8 0)
Modified: llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll Wed Jan 31 14:04:26 2018
@@ -82,14 +82,14 @@ define <4 x float> @test02(<8 x float> %
; VZ-LABEL: test02:
; VZ: # %bb.0:
; VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; VZ-NEXT: vzeroupper
; VZ-NEXT: jmp do_sse # TAILCALL
;
; NO-VZ-LABEL: test02:
; NO-VZ: # %bb.0:
; NO-VZ-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NO-VZ-NEXT: jmp do_sse # TAILCALL
%add.i = fadd <8 x float> %a, %b
%add.low = call <4 x float> @llvm.x86.avx.vextractf128.ps.256(<8 x float> %add.i, i8 0)
@@ -222,10 +222,10 @@ define <4 x float> @test04(<4 x float> %
; VZ-LABEL: test04:
; VZ: # %bb.0:
; VZ-NEXT: pushq %rax
-; VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; VZ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; VZ-NEXT: callq do_avx
-; VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; VZ-NEXT: popq %rax
; VZ-NEXT: vzeroupper
; VZ-NEXT: retq
@@ -233,10 +233,10 @@ define <4 x float> @test04(<4 x float> %
; NO-VZ-LABEL: test04:
; NO-VZ: # %bb.0:
; NO-VZ-NEXT: pushq %rax
-; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; NO-VZ-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; NO-VZ-NEXT: callq do_avx
-; NO-VZ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NO-VZ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NO-VZ-NEXT: popq %rax
; NO-VZ-NEXT: retq
%shuf = shufflevector <4 x float> %a, <4 x float> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
Modified: llvm/trunk/test/CodeGen/X86/avx2-conversions.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-conversions.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-conversions.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-conversions.ll Wed Jan 31 14:04:26 2018
@@ -9,7 +9,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) n
; X32-SLOW: # %bb.0:
; X32-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X32-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-SLOW-NEXT: vzeroupper
; X32-SLOW-NEXT: retl
;
@@ -17,7 +17,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) n
; X32-FAST: # %bb.0:
; X32-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; X32-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; X32-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-FAST-NEXT: vzeroupper
; X32-FAST-NEXT: retl
;
@@ -25,7 +25,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) n
; X64-SLOW: # %bb.0:
; X64-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
; X64-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-SLOW-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-SLOW-NEXT: vzeroupper
; X64-SLOW-NEXT: retq
;
@@ -33,7 +33,7 @@ define <4 x i32> @trunc4(<4 x i64> %A) n
; X64-FAST: # %bb.0:
; X64-FAST-NEXT: vmovaps {{.*#+}} ymm1 = [0,2,4,6,4,6,6,7]
; X64-FAST-NEXT: vpermps %ymm0, %ymm1, %ymm0
-; X64-FAST-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-FAST-NEXT: vzeroupper
; X64-FAST-NEXT: retq
%B = trunc <4 x i64> %A to <4 x i32>
@@ -45,7 +45,7 @@ define <8 x i16> @trunc8(<8 x i32> %A) n
; X32: # %bb.0:
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -53,7 +53,7 @@ define <8 x i16> @trunc8(<8 x i32> %A) n
; X64: # %bb.0:
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%B = trunc <8 x i32> %A to <8 x i16>
Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-fast-isel.ll Wed Jan 31 14:04:26 2018
@@ -355,7 +355,7 @@ define <4 x double> @test_mm256_broadcas
define <4 x i64> @test_mm256_broadcastsi128_si256(<2 x i64> %a0) {
; CHECK-LABEL: test_mm256_broadcastsi128_si256:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: ret{{[l|q]}}
%res = shufflevector <2 x i64> %a0, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 0, i32 1>
@@ -1447,7 +1447,7 @@ define <4 x float> @test_mm256_mask_i64g
define <4 x i64> @test0_mm256_inserti128_si256(<4 x i64> %a0, <2 x i64> %a1) nounwind {
; CHECK-LABEL: test0_mm256_inserti128_si256:
; CHECK: # %bb.0:
-; CHECK-NEXT: # kill: def %xmm1 killed %xmm1 def %ymm1
+; CHECK-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; CHECK-NEXT: ret{{[l|q]}}
%ext = shufflevector <2 x i64> %a1, <2 x i64> %a1, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
Modified: llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-masked-gather.ll Wed Jan 31 14:04:26 2018
@@ -32,7 +32,7 @@ define <2 x i32> @masked_gather_v2i32(<2
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB0_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -82,7 +82,7 @@ define <4 x i32> @masked_gather_v2i32_co
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB1_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -134,7 +134,7 @@ define <2 x float> @masked_gather_v2floa
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB2_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -182,7 +182,7 @@ define <4 x float> @masked_gather_v2floa
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB3_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -229,7 +229,7 @@ define <4 x i32> @masked_gather_v4i32(<4
; NOGATHER-LABEL: masked_gather_v4i32:
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm3
+; NOGATHER-NEXT: # implicit-def: $xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB4_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -289,7 +289,7 @@ define <4 x float> @masked_gather_v4floa
; NOGATHER-LABEL: masked_gather_v4float:
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vpextrb $0, %xmm1, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm3
+; NOGATHER-NEXT: # implicit-def: $xmm3
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB5_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -360,7 +360,7 @@ define <8 x i32> @masked_gather_v8i32(<8
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB6_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -476,7 +476,7 @@ define <8 x float> @masked_gather_v8floa
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm4
; NOGATHER-NEXT: vmovdqa 32(%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB7_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -587,7 +587,7 @@ define <4 x i64> @masked_gather_v4i64(<4
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB8_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -662,7 +662,7 @@ define <4 x double> @masked_gather_v4dou
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %ymm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %ymm2
+; NOGATHER-NEXT: # implicit-def: $ymm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB9_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -735,7 +735,7 @@ define <2 x i64> @masked_gather_v2i64(<2
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB10_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
@@ -782,7 +782,7 @@ define <2 x double> @masked_gather_v2dou
; NOGATHER: # %bb.0: # %entry
; NOGATHER-NEXT: vmovdqa (%rdi), %xmm3
; NOGATHER-NEXT: vpextrb $0, %xmm0, %eax
-; NOGATHER-NEXT: # implicit-def: %xmm2
+; NOGATHER-NEXT: # implicit-def: $xmm2
; NOGATHER-NEXT: testb $1, %al
; NOGATHER-NEXT: je .LBB11_2
; NOGATHER-NEXT: # %bb.1: # %cond.load
Modified: llvm/trunk/test/CodeGen/X86/avx2-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-shift.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-shift.ll Wed Jan 31 14:04:26 2018
@@ -532,7 +532,7 @@ define <8 x i16> @variable_shl16(<8 x i1
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -543,7 +543,7 @@ define <8 x i16> @variable_shl16(<8 x i1
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = shl <8 x i16> %lhs, %rhs
@@ -582,7 +582,7 @@ define <8 x i16> @variable_lshr16(<8 x i
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -593,7 +593,7 @@ define <8 x i16> @variable_lshr16(<8 x i
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%res = lshr <8 x i16> %lhs, %rhs
Modified: llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx2-vector-shifts.ll Wed Jan 31 14:04:26 2018
@@ -431,7 +431,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r,
; X32-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -442,7 +442,7 @@ define <8 x i16> @shl_8i16(<8 x i16> %r,
; X64-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%shl = shl <8 x i16> %r, %a
@@ -639,7 +639,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r
; X32-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X32-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -650,7 +650,7 @@ define <8 x i16> @lshr_8i16(<8 x i16> %r
; X64-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
-; X64-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; X64-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
%lshr = lshr <8 x i16> %r, %a
Modified: llvm/trunk/test/CodeGen/X86/avx512-arith.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-arith.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-arith.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-arith.ll Wed Jan 31 14:04:26 2018
@@ -176,10 +176,10 @@ define <4 x i64> @imulq256(<4 x i64> %y,
;
; AVX512DQ-LABEL: imulq256:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
;
; SKX-LABEL: imulq256:
@@ -229,10 +229,10 @@ define <2 x i64> @imulq128(<2 x i64> %y,
;
; AVX512DQ-LABEL: imulq128:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpmullq %zmm0, %zmm1, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
@@ -708,7 +708,7 @@ define <16 x float> @test_mask_vminps(<1
define <8 x double> @test_mask_vminpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vminpd:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512F-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512F-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
@@ -721,14 +721,14 @@ define <8 x double> @test_mask_vminpd(<8
;
; AVX512BW-LABEL: test_mask_vminpd:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512BW-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512BW-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512BW-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vminpd:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512DQ-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512DQ-NEXT: vminpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
@@ -765,7 +765,7 @@ define <16 x float> @test_mask_vmaxps(<1
define <8 x double> @test_mask_vmaxpd(<8 x double> %dst, <8 x double> %i,
; AVX512F-LABEL: test_mask_vmaxpd:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512F-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512F-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512F-NEXT: retq
@@ -778,14 +778,14 @@ define <8 x double> @test_mask_vmaxpd(<8
;
; AVX512BW-LABEL: test_mask_vmaxpd:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512BW-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512BW-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512BW-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_mask_vmaxpd:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm3 killed %ymm3 def %zmm3
+; AVX512DQ-NEXT: # kill: def $ymm3 killed $ymm3 def $zmm3
; AVX512DQ-NEXT: vptestmd %zmm3, %zmm3, %k1
; AVX512DQ-NEXT: vmaxpd %zmm2, %zmm1, %zmm0 {%k1}
; AVX512DQ-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll Wed Jan 31 14:04:26 2018
@@ -14,7 +14,7 @@ define <16 x i32> @test2(<16 x i32> %x)
define <16 x float> @test3(<4 x float> %a) {
; CHECK-LABEL: test3:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,1,2,3,4,18,16,7,8,9,10,11,12,13,14,15]
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpermt2ps %zmm0, %zmm2, %zmm1
Modified: llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-calling-conv.ll Wed Jan 31 14:04:26 2018
@@ -67,7 +67,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vpslld $31, %ymm0, %ymm0
@@ -95,7 +95,7 @@ define <8 x i32> @test5(<8 x i32>%a, <8
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL_X32-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL_X32-NEXT: vpslld $31, %ymm0, %ymm0
@@ -195,7 +195,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8
; KNL-NEXT: .cfi_def_cfa_offset 16
; KNL-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: callq _func8xi1
; KNL-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
; KNL-NEXT: popq %rax
@@ -219,7 +219,7 @@ define <8 x i1> @test7a(<8 x i32>%a, <8
; KNL_X32-NEXT: .cfi_def_cfa_offset 16
; KNL_X32-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0
-; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL_X32-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL_X32-NEXT: calll _func8xi1
; KNL_X32-NEXT: vandps LCPI7_0, %xmm0, %xmm0
; KNL_X32-NEXT: addl $12, %esp
@@ -378,21 +378,21 @@ define <1 x i1> @test13(<1 x i1>* %foo)
; KNL-LABEL: test13:
; KNL: ## %bb.0:
; KNL-NEXT: movzbl (%rdi), %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
; SKX: ## %bb.0:
; SKX-NEXT: kmovb (%rdi), %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; KNL_X32-LABEL: test13:
; KNL_X32: ## %bb.0:
; KNL_X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; KNL_X32-NEXT: movzbl (%eax), %eax
-; KNL_X32-NEXT: ## kill: def %al killed %al killed %eax
+; KNL_X32-NEXT: ## kill: def $al killed $al killed $eax
; KNL_X32-NEXT: retl
%bar = load <1 x i1>, <1 x i1>* %foo
ret <1 x i1> %bar
Modified: llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cmp-kor-sequence.ll Wed Jan 31 14:04:26 2018
@@ -19,7 +19,7 @@ define zeroext i16 @cmp_kor_seq_16(<16 x
; CHECK-NEXT: korw %k2, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%0 = tail call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %x, i32 13, i16 -1, i32 4)
Modified: llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-cvt.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-cvt.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-cvt.ll Wed Jan 31 14:04:26 2018
@@ -80,9 +80,9 @@ define <4 x double> @slto4f64(<4 x i64>
;
; AVX512DQ-LABEL: slto4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x double>
ret <4 x double> %b
@@ -105,9 +105,9 @@ define <2 x double> @slto2f64(<2 x i64>
;
; AVX512DQ-LABEL: slto2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2pd %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x double>
@@ -133,9 +133,9 @@ define <2 x float> @sltof2f32(<2 x i64>
;
; AVX512DQ-LABEL: sltof2f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <2 x i64> %a to <2 x float>
@@ -170,7 +170,7 @@ define <4 x float> @slto4f32_mem(<4 x i6
; AVX512DQ: # %bb.0:
; AVX512DQ-NEXT: vmovups (%rdi), %ymm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%a1 = load <4 x i64>, <4 x i64>* %a, align 8
@@ -204,9 +204,9 @@ define <4 x i64> @f64to4sl(<4 x double>
;
; AVX512DQ-LABEL: f64to4sl:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvttpd2qq %zmm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
%b = fptosi <4 x double> %a to <4 x i64>
ret <4 x i64> %b
@@ -238,9 +238,9 @@ define <4 x i64> @f32to4sl(<4 x float> %
;
; AVX512DQ-LABEL: f32to4sl:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; AVX512DQ-NEXT: vcvttps2qq %ymm0, %zmm0
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512DQ-NEXT: retq
%b = fptosi <4 x float> %a to <4 x i64>
ret <4 x i64> %b
@@ -272,9 +272,9 @@ define <4 x float> @slto4f32(<4 x i64> %
;
; AVX512DQ-LABEL: slto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = sitofp <4 x i64> %a to <4 x float>
@@ -307,9 +307,9 @@ define <4 x float> @ulto4f32(<4 x i64> %
;
; AVX512DQ-LABEL: ulto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vcvtuqq2ps %zmm0, %ymm0
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = uitofp <4 x i64> %a to <4 x float>
@@ -484,9 +484,9 @@ define <16 x i16> @f32to16us(<16 x float
define <8 x i32> @f32to8ui(<8 x float> %a) nounwind {
; NOVL-LABEL: f32to8ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: f32to8ui:
@@ -500,9 +500,9 @@ define <8 x i32> @f32to8ui(<8 x float> %
define <4 x i32> @f32to4ui(<4 x float> %a) nounwind {
; NOVL-LABEL: f32to4ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVL-NEXT: vcvttps2udq %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -528,7 +528,7 @@ define <8 x i16> @f64to8us(<8 x double>
; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -547,7 +547,7 @@ define <8 x i8> @f64to8uc(<8 x double> %
; NOVL: # %bb.0:
; NOVL-NEXT: vcvttpd2dq %zmm0, %ymm0
; NOVL-NEXT: vpmovdw %zmm0, %ymm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -564,9 +564,9 @@ define <8 x i8> @f64to8uc(<8 x double> %
define <4 x i32> @f64to4ui(<4 x double> %a) nounwind {
; NOVL-LABEL: f64to4ui:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVL-NEXT: vcvttpd2udq %zmm0, %ymm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -705,7 +705,7 @@ define <4 x float> @f64to4f32_mask(<4 x
; NOVL-NEXT: vptestmd %zmm1, %zmm1, %k1
; NOVL-NEXT: vcvtpd2ps %ymm0, %xmm0
; NOVL-NEXT: vmovaps %zmm0, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -744,12 +744,12 @@ define <8 x double> @f32to8f64(<8 x floa
define <4 x double> @f32to4f64_mask(<4 x float> %b, <4 x double> %b1, <4 x double> %a1) {
; NOVL-LABEL: f32to4f64_mask:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvtps2pd %xmm0, %ymm0
; NOVL-NEXT: vcmpltpd %zmm2, %zmm1, %k1
; NOVL-NEXT: vmovapd %zmm0, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: f32to4f64_mask:
@@ -1291,9 +1291,9 @@ define <8 x double> @uito8f64_maskz(<8 x
define <4 x double> @uito4f64(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f64:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %ymm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: uito4f64:
@@ -1325,9 +1325,9 @@ define <8 x double> @uito8f64(<8 x i32>
define <8 x float> @uito8f32(<8 x i32> %a) nounwind {
; NOVL-LABEL: uito8f32:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: uito8f32:
@@ -1341,9 +1341,9 @@ define <8 x float> @uito8f32(<8 x i32> %
define <4 x float> @uito4f32(<4 x i32> %a) nounwind {
; NOVL-LABEL: uito4f32:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVL-NEXT: vcvtudq2ps %zmm0, %zmm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -1556,7 +1556,7 @@ define <8 x double> @sbto8f64(<8 x doubl
define <8 x float> @sbto8f32(<8 x float> %a) {
; NOVLDQ-LABEL: sbto8f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1582,7 +1582,7 @@ define <8 x float> @sbto8f32(<8 x float>
;
; AVX512DQ-LABEL: sbto8f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1596,7 +1596,7 @@ define <8 x float> @sbto8f32(<8 x float>
define <4 x float> @sbto4f32(<4 x float> %a) {
; NOVLDQ-LABEL: sbto4f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1623,7 +1623,7 @@ define <4 x float> @sbto4f32(<4 x float>
;
; AVX512DQ-LABEL: sbto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1638,7 +1638,7 @@ define <4 x float> @sbto4f32(<4 x float>
define <4 x double> @sbto4f64(<4 x double> %a) {
; NOVLDQ-LABEL: sbto4f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1664,7 +1664,7 @@ define <4 x double> @sbto4f64(<4 x doubl
;
; AVX512DQ-LABEL: sbto4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1678,7 +1678,7 @@ define <4 x double> @sbto4f64(<4 x doubl
define <2 x float> @sbto2f32(<2 x float> %a) {
; NOVLDQ-LABEL: sbto2f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltps %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1705,7 +1705,7 @@ define <2 x float> @sbto2f32(<2 x float>
;
; AVX512DQ-LABEL: sbto2f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltps %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1720,7 +1720,7 @@ define <2 x float> @sbto2f32(<2 x float>
define <2 x double> @sbto2f64(<2 x double> %a) {
; NOVLDQ-LABEL: sbto2f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vcmpltpd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
@@ -1747,7 +1747,7 @@ define <2 x double> @sbto2f64(<2 x doubl
;
; AVX512DQ-LABEL: sbto2f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX512DQ-NEXT: vcmpltpd %zmm0, %zmm1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
@@ -1961,7 +1961,7 @@ define <16 x double> @ubto16f64(<16 x i3
define <8 x float> @ubto8f32(<8 x i32> %a) {
; NOVLDQ-LABEL: ubto8f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -1985,7 +1985,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
;
; AVX512DQ-LABEL: ubto8f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2ps %ymm0, %ymm0
@@ -1998,7 +1998,7 @@ define <8 x float> @ubto8f32(<8 x i32> %
define <8 x double> @ubto8f64(<8 x i32> %a) {
; NOVLDQ-LABEL: ubto8f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2022,7 +2022,7 @@ define <8 x double> @ubto8f64(<8 x i32>
;
; AVX512DQ-LABEL: ubto8f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2pd %ymm0, %zmm0
@@ -2035,7 +2035,7 @@ define <8 x double> @ubto8f64(<8 x i32>
define <4 x float> @ubto4f32(<4 x i32> %a) {
; NOVLDQ-LABEL: ubto4f32:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2060,7 +2060,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
;
; AVX512DQ-LABEL: ubto4f32:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2ps %xmm0, %xmm0
@@ -2074,7 +2074,7 @@ define <4 x float> @ubto4f32(<4 x i32> %
define <4 x double> @ubto4f64(<4 x i32> %a) {
; NOVLDQ-LABEL: ubto4f64:
; NOVLDQ: # %bb.0:
-; NOVLDQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; NOVLDQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; NOVLDQ-NEXT: vpxor %xmm1, %xmm1, %xmm1
; NOVLDQ-NEXT: vpcmpgtd %zmm0, %zmm1, %k1
; NOVLDQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
@@ -2098,7 +2098,7 @@ define <4 x double> @ubto4f64(<4 x i32>
;
; AVX512DQ-LABEL: ubto4f64:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpmovd2m %zmm0, %k1
; AVX512DQ-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; AVX512DQ-NEXT: vcvtdq2pd %xmm0, %ymm0
@@ -2140,7 +2140,7 @@ define <2 x double> @ubto2f64(<2 x i32>
; NOVL-NEXT: vpcmpltuq %zmm1, %zmm0, %k1
; NOVL-NEXT: vpbroadcastd {{.*}}(%rip), %zmm0 {%k1} {z}
; NOVL-NEXT: vcvtudq2pd %ymm0, %zmm0
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -2160,7 +2160,7 @@ define <2 x double> @ubto2f64(<2 x i32>
define <2 x i64> @test_2f64toub(<2 x double> %a, <2 x i64> %passthru) {
; KNL-LABEL: test_2f64toub:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL-NEXT: vcvttsd2si %xmm0, %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
@@ -2170,7 +2170,7 @@ define <2 x i64> @test_2f64toub(<2 x dou
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2184,7 +2184,7 @@ define <2 x i64> @test_2f64toub(<2 x dou
;
; AVX512DQ-LABEL: test_2f64toub:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512DQ-NEXT: vcvttsd2si %xmm2, %eax
; AVX512DQ-NEXT: kmovw %eax, %k0
@@ -2195,13 +2195,13 @@ define <2 x i64> @test_2f64toub(<2 x dou
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
; AVX512DQ-NEXT: korb %k0, %k1, %k1
; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_2f64toub:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vcvttsd2si %xmm0, %eax
; AVX512BW-NEXT: andl $1, %eax
; AVX512BW-NEXT: kmovw %eax, %k0
@@ -2211,7 +2211,7 @@ define <2 x i64> @test_2f64toub(<2 x dou
; AVX512BW-NEXT: kshiftlw $1, %k1, %k1
; AVX512BW-NEXT: korw %k1, %k0, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%mask = fptoui <2 x double> %a to <2 x i1>
@@ -2222,12 +2222,12 @@ define <2 x i64> @test_2f64toub(<2 x dou
define <4 x i64> @test_4f64toub(<4 x double> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f64toub:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttpd2dq %ymm0, %xmm0
; NOVL-NEXT: vpslld $31, %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f64toub:
@@ -2266,12 +2266,12 @@ define <8 x i64> @test_8f64toub(<8 x dou
define <2 x i64> @test_2f32toub(<2 x float> %a, <2 x i64> %passthru) {
; NOVL-LABEL: test_2f32toub:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; NOVL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vpslld $31, %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -2290,12 +2290,12 @@ define <2 x i64> @test_2f32toub(<2 x flo
define <4 x i64> @test_4f32toub(<4 x float> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f32toub:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vpslld $31, %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f32toub:
@@ -2347,7 +2347,7 @@ define <16 x i32> @test_16f32toub(<16 x
define <2 x i64> @test_2f64tosb(<2 x double> %a, <2 x i64> %passthru) {
; KNL-LABEL: test_2f64tosb:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; KNL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; KNL-NEXT: vcvttsd2si %xmm0, %eax
; KNL-NEXT: andl $1, %eax
; KNL-NEXT: kmovw %eax, %k0
@@ -2357,7 +2357,7 @@ define <2 x i64> @test_2f64tosb(<2 x dou
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k1
; KNL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2371,7 +2371,7 @@ define <2 x i64> @test_2f64tosb(<2 x dou
;
; AVX512DQ-LABEL: test_2f64tosb:
; AVX512DQ: # %bb.0:
-; AVX512DQ-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512DQ-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512DQ-NEXT: vpermilpd {{.*#+}} xmm2 = xmm0[1,0]
; AVX512DQ-NEXT: vcvttsd2si %xmm2, %eax
; AVX512DQ-NEXT: kmovw %eax, %k0
@@ -2382,13 +2382,13 @@ define <2 x i64> @test_2f64tosb(<2 x dou
; AVX512DQ-NEXT: kshiftrb $7, %k1, %k1
; AVX512DQ-NEXT: korb %k0, %k1, %k1
; AVX512DQ-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512DQ-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
;
; AVX512BW-LABEL: test_2f64tosb:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vcvttsd2si %xmm0, %eax
; AVX512BW-NEXT: andl $1, %eax
; AVX512BW-NEXT: kmovw %eax, %k0
@@ -2398,7 +2398,7 @@ define <2 x i64> @test_2f64tosb(<2 x dou
; AVX512BW-NEXT: kshiftlw $1, %k1, %k1
; AVX512BW-NEXT: korw %k1, %k0, %k1
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%mask = fptosi <2 x double> %a to <2 x i1>
@@ -2409,11 +2409,11 @@ define <2 x i64> @test_2f64tosb(<2 x dou
define <4 x i64> @test_4f64tosb(<4 x double> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f64tosb:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttpd2dq %ymm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f64tosb:
@@ -2449,11 +2449,11 @@ define <8 x i64> @test_8f64tosb(<8 x dou
define <2 x i64> @test_2f32tosb(<2 x float> %a, <2 x i64> %passthru) {
; NOVL-LABEL: test_2f32tosb:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %xmm1 killed %xmm1 def %zmm1
+; NOVL-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; NOVL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; NOVL-NEXT: vzeroupper
; NOVL-NEXT: retq
;
@@ -2471,11 +2471,11 @@ define <2 x i64> @test_2f32tosb(<2 x flo
define <4 x i64> @test_4f32tosb(<4 x float> %a, <4 x i64> %passthru) {
; NOVL-LABEL: test_4f32tosb:
; NOVL: # %bb.0:
-; NOVL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; NOVL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; NOVL-NEXT: vcvttps2dq %xmm0, %xmm0
; NOVL-NEXT: vptestmd %zmm0, %zmm0, %k1
; NOVL-NEXT: vmovdqa64 %zmm1, %zmm0 {%k1} {z}
-; NOVL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; NOVL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; NOVL-NEXT: retq
;
; VL-LABEL: test_4f32tosb:
Modified: llvm/trunk/test/CodeGen/X86/avx512-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-ext.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-ext.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-ext.ll Wed Jan 31 14:04:26 2018
@@ -302,7 +302,7 @@ define <4 x i32> @zext_4x8mem_to_4x32(<4
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x32:
@@ -324,7 +324,7 @@ define <4 x i32> @sext_4x8mem_to_4x32(<4
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbd (%rdi), %xmm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x32:
@@ -347,7 +347,7 @@ define <8 x i32> @zext_8x8mem_to_8x32(<8
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x8mem_to_8x32:
@@ -370,7 +370,7 @@ define <8 x i32> @sext_8x8mem_to_8x32(<8
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbd (%rdi), %ymm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x8mem_to_8x32:
@@ -492,7 +492,7 @@ define <2 x i64> @zext_2x8mem_to_2x64(<2
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x8mem_to_2x64:
@@ -513,7 +513,7 @@ define <2 x i64> @sext_2x8mem_to_2x64mas
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbq (%rdi), %xmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x8mem_to_2x64mask:
@@ -544,7 +544,7 @@ define <4 x i64> @zext_4x8mem_to_4x64(<4
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxbq {{.*#+}} ymm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero,mem[2],zero,zero,zero,zero,zero,zero,zero,mem[3],zero,zero,zero,zero,zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x8mem_to_4x64:
@@ -566,7 +566,7 @@ define <4 x i64> @sext_4x8mem_to_4x64mas
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxbq (%rdi), %ymm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x8mem_to_4x64mask:
@@ -650,7 +650,7 @@ define <4 x i32> @zext_4x16mem_to_4x32(<
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x32:
@@ -672,7 +672,7 @@ define <4 x i32> @sext_4x16mem_to_4x32ma
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwd (%rdi), %xmm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x32mask:
@@ -706,7 +706,7 @@ define <8 x i32> @zext_8x16mem_to_8x32(<
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16mem_to_8x32:
@@ -729,7 +729,7 @@ define <8 x i32> @sext_8x16mem_to_8x32ma
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwd (%rdi), %ymm0
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8x16mem_to_8x32mask:
@@ -762,7 +762,7 @@ define <8 x i32> @zext_8x16_to_8x32mask(
; KNL-NEXT: vptestmq %zmm1, %zmm1, %k1
; KNL-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; KNL-NEXT: vmovdqa32 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_8x16_to_8x32mask:
@@ -872,7 +872,7 @@ define <2 x i64> @zext_2x16mem_to_2x64(<
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwq {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x16mem_to_2x64:
@@ -894,7 +894,7 @@ define <2 x i64> @sext_2x16mem_to_2x64ma
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwq (%rdi), %xmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x16mem_to_2x64mask:
@@ -926,7 +926,7 @@ define <4 x i64> @zext_4x16mem_to_4x64(<
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxwq {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x16mem_to_4x64:
@@ -948,7 +948,7 @@ define <4 x i64> @sext_4x16mem_to_4x64ma
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxwq (%rdi), %ymm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x16mem_to_4x64mask:
@@ -1061,7 +1061,7 @@ define <2 x i64> @zext_2x32mem_to_2x64(<
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_2x32mem_to_2x64:
@@ -1083,7 +1083,7 @@ define <2 x i64> @sext_2x32mem_to_2x64ma
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxdq (%rdi), %xmm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_2x32mem_to_2x64mask:
@@ -1115,7 +1115,7 @@ define <4 x i64> @zext_4x32mem_to_4x64(<
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32mem_to_4x64:
@@ -1137,7 +1137,7 @@ define <4 x i64> @sext_4x32mem_to_4x64ma
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpmovsxdq (%rdi), %ymm0
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_4x32mem_to_4x64mask:
@@ -1178,7 +1178,7 @@ define <4 x i64> @zext_4x32_to_4x64mask(
; KNL-NEXT: vptestmd %zmm1, %zmm1, %k1
; KNL-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; KNL-NEXT: vmovdqa64 %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: zext_4x32_to_4x64mask:
@@ -1331,7 +1331,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %ax killed %ax killed %eax
+; KNL-NEXT: # kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i8_to_16i1:
@@ -1339,7 +1339,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0
; SKX-NEXT: vpmovb2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
@@ -1352,7 +1352,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
; KNL-NEXT: vpslld $31, %zmm0, %zmm0
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %ax killed %ax killed %eax
+; KNL-NEXT: # kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_16i32_to_16i1:
@@ -1360,7 +1360,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
; SKX-NEXT: vpslld $31, %zmm0, %zmm0
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%mask_b = trunc <16 x i32>%a to <16 x i1>
@@ -1390,7 +1390,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
; KNL-NEXT: vpsllq $63, %zmm0, %zmm0
; KNL-NEXT: vptestmq %zmm0, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %al killed %al killed %eax
+; KNL-NEXT: # kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_8i16_to_8i1:
@@ -1398,7 +1398,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0
; SKX-NEXT: vpmovw2m %xmm0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
@@ -1410,7 +1410,7 @@ define <8 x i32> @sext_8i1_8i32(<8 x i32
; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i32:
@@ -1436,7 +1436,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; KNL-NEXT: kmovw %edi, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: # kill: def %ax killed %ax killed %eax
+; KNL-NEXT: # kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: trunc_i32_to_i1:
@@ -1449,7 +1449,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; SKX-NEXT: kmovw %edi, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
@@ -1462,7 +1462,7 @@ define <8 x i16> @sext_8i1_8i16(<8 x i32
; KNL: # %bb.0:
; KNL-NEXT: vpcmpgtd %ymm0, %ymm1, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: sext_8i1_8i16:
Modified: llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-extract-subvector.ll Wed Jan 31 14:04:26 2018
@@ -15,7 +15,7 @@ define <8 x i16> @extract_subvector128_v
define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
; SKX-LABEL: extract_subvector128_v32i16_first_element:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@@ -35,7 +35,7 @@ define <16 x i8> @extract_subvector128_v
define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
; SKX-LABEL: extract_subvector128_v64i8_first_element:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
Modified: llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-hadd-hsub.ll Wed Jan 31 14:04:26 2018
@@ -63,7 +63,7 @@ define float @fhadd_16(<16 x float> %x22
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhadd_16:
@@ -72,7 +72,7 @@ define float @fhadd_16(<16 x float> %x22
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -90,7 +90,7 @@ define float @fhsub_16(<16 x float> %x22
; KNL-NEXT: vaddps %zmm1, %zmm0, %zmm0
; KNL-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; KNL-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fhsub_16:
@@ -99,7 +99,7 @@ define float @fhsub_16(<16 x float> %x22
; SKX-NEXT: vaddps %zmm1, %zmm0, %zmm0
; SKX-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
; SKX-NEXT: vsubps %zmm1, %zmm0, %zmm0
-; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <16 x float> %x225, <16 x float> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
@@ -181,7 +181,7 @@ define <4 x double> @fadd_noundef_low(<8
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_low:
@@ -189,7 +189,7 @@ define <4 x double> @fadd_noundef_low(<8
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; SKX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
%x228 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5 ,i32 13, i32 7, i32 15>
@@ -228,7 +228,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32
; KNL-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; KNL-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; KNL-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: hadd_16_3_sv:
@@ -236,7 +236,7 @@ define <8 x i32> @hadd_16_3_sv(<16 x i32
; SKX-NEXT: vshufps {{.*#+}} zmm2 = zmm0[0,2],zmm1[0,2],zmm0[4,6],zmm1[4,6],zmm0[8,10],zmm1[8,10],zmm0[12,14],zmm1[12,14]
; SKX-NEXT: vshufps {{.*#+}} zmm0 = zmm0[1,3],zmm1[1,3],zmm0[5,7],zmm1[5,7],zmm0[9,11],zmm1[9,11],zmm0[13,15],zmm1[13,15]
; SKX-NEXT: vpaddd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; SKX-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; SKX-NEXT: retq
%x226 = shufflevector <16 x i32> %x225, <16 x i32> %x227, <16 x i32> <i32 0, i32 2, i32 16, i32 18
, i32 4, i32 6, i32 20, i32 22, i32 8, i32 10, i32 24, i32 26, i32 12, i32 14, i32 28, i32 30>
@@ -255,7 +255,7 @@ define double @fadd_noundef_eel(<8 x dou
; KNL-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; KNL-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; KNL-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: fadd_noundef_eel:
@@ -263,7 +263,7 @@ define double @fadd_noundef_eel(<8 x dou
; SKX-NEXT: vunpcklpd {{.*#+}} zmm2 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6]
; SKX-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7]
; SKX-NEXT: vaddpd %zmm0, %zmm2, %zmm0
-; SKX-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; SKX-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%x226 = shufflevector <8 x double> %x225, <8 x double> %x227, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
Modified: llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll Wed Jan 31 14:04:26 2018
@@ -85,7 +85,7 @@ define float @test7(<16 x float> %x, i32
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -104,7 +104,7 @@ define double @test8(<8 x double> %x, i3
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -123,7 +123,7 @@ define float @test9(<8 x float> %x, i32
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -142,7 +142,7 @@ define i32 @test10(<16 x i32> %x, i32 %i
; CHECK-NEXT: movq %rsp, %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -231,7 +231,7 @@ define i16 @test13(i32 %a, i32 %b) {
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test13:
@@ -246,7 +246,7 @@ define i16 @test13(i32 %a, i32 %b) {
; SKX-NEXT: kmovw %eax, %k1
; SKX-NEXT: korw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%cmp_res = icmp ult i32 %a, %b
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %cmp_res, i32 0
@@ -309,7 +309,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
; KNL-NEXT: kshiftrw $5, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test16:
@@ -322,7 +322,7 @@ define i16 @test16(i1 *%addr, i16 %a) {
; SKX-NEXT: kshiftrw $5, %k0, %k0
; SKX-NEXT: kxorw %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i16 %a to <16 x i1>
@@ -343,7 +343,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
; KNL-NEXT: kshiftrw $11, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test17:
@@ -356,7 +356,7 @@ define i8 @test17(i1 *%addr, i8 %a) {
; SKX-NEXT: kshiftrb $3, %k0, %k0
; SKX-NEXT: kxorb %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%x = load i1 , i1 * %addr, align 128
%a1 = bitcast i8 %a to <8 x i1>
@@ -451,7 +451,7 @@ define i16 @extract_v32i16(<32 x i16> %x
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i16> %x, i32 1
@@ -466,7 +466,7 @@ define i16 @extract_v16i16(<16 x i16> %x
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrw $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <16 x i16> %x, i32 1
@@ -480,7 +480,7 @@ define i16 @extract_v8i16(<8 x i16> %x,
; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrw $1, %xmm0, %eax
; CHECK-NEXT: vpextrw $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%r1 = extractelement <8 x i16> %x, i32 1
%r2 = extractelement <8 x i16> %x, i32 3
@@ -494,7 +494,7 @@ define i8 @extract_v64i8(<64 x i8> %x, i
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <64 x i8> %x, i32 1
@@ -509,7 +509,7 @@ define i8 @extract_v32i8(<32 x i8> %x, i
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0
; CHECK-NEXT: vpextrb $1, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%r1 = extractelement <32 x i8> %x, i32 1
@@ -523,7 +523,7 @@ define i8 @extract_v16i8(<16 x i8> %x, i
; CHECK: ## %bb.0:
; CHECK-NEXT: vpextrb $1, %xmm0, %eax
; CHECK-NEXT: vpextrb $3, %xmm0, (%rdi)
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%r1 = extractelement <16 x i8> %x, i32 1
%r2 = extractelement <16 x i8> %x, i32 3
@@ -825,8 +825,8 @@ define i32 @test_insertelement_v32i1(i32
define i8 @test_iinsertelement_v4i1(i32 %a, i32 %b, <4 x i32> %x , <4 x i32> %y) {
; KNL-LABEL: test_iinsertelement_v4i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: vpcmpltud %zmm1, %zmm0, %k0
@@ -837,7 +837,7 @@ define i8 @test_iinsertelement_v4i1(i32
; KNL-NEXT: kshiftrw $13, %k1, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -853,7 +853,7 @@ define i8 @test_iinsertelement_v4i1(i32
; SKX-NEXT: kshiftrb $5, %k1, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <4 x i32> %x, %y
@@ -866,8 +866,8 @@ define i8 @test_iinsertelement_v4i1(i32
define i8 @test_iinsertelement_v2i1(i32 %a, i32 %b, <2 x i64> %x , <2 x i64> %y) {
; KNL-LABEL: test_iinsertelement_v2i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: cmpl %esi, %edi
; KNL-NEXT: setb %al
; KNL-NEXT: vpcmpltuq %zmm1, %zmm0, %k0
@@ -877,7 +877,7 @@ define i8 @test_iinsertelement_v2i1(i32
; KNL-NEXT: kshiftlw $1, %k1, %k1
; KNL-NEXT: korw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -892,7 +892,7 @@ define i8 @test_iinsertelement_v2i1(i32
; SKX-NEXT: kshiftlb $1, %k1, %k1
; SKX-NEXT: korb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
%cmp_res_i1 = icmp ult i32 %a, %b
%cmp_cmp_vec = icmp ult <2 x i64> %x, %y
@@ -905,8 +905,8 @@ define i8 @test_iinsertelement_v2i1(i32
define zeroext i8 @test_extractelement_v2i1(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: test_extractelement_v2i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
@@ -934,8 +934,8 @@ define zeroext i8 @test_extractelement_v
define zeroext i8 @extractelement_v2i1_alt(<2 x i64> %a, <2 x i64> %b) {
; KNL-LABEL: extractelement_v2i1_alt:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
@@ -964,8 +964,8 @@ define zeroext i8 @extractelement_v2i1_a
define zeroext i8 @test_extractelement_v4i1(<4 x i32> %a, <4 x i32> %b) {
; KNL-LABEL: test_extractelement_v4i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; KNL-NEXT: kshiftrw $3, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
@@ -1091,7 +1091,7 @@ define zeroext i8 @extractelement_v64i1_
define i64 @test_extractelement_variable_v2i64(<2 x i64> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2i64:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: movq -24(%rsp,%rdi,8), %rax
@@ -1110,7 +1110,7 @@ define i64 @test_extractelement_variable
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
@@ -1132,7 +1132,7 @@ define i64 @test_extractelement_variable
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movq (%rsp,%rdi,8), %rax
@@ -1147,7 +1147,7 @@ define i64 @test_extractelement_variable
define double @test_extractelement_variable_v2f64(<2 x double> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v2f64:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $1, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1166,7 +1166,7 @@ define double @test_extractelement_varia
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1188,7 +1188,7 @@ define double @test_extractelement_varia
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
@@ -1203,7 +1203,7 @@ define double @test_extractelement_varia
define i32 @test_extractelement_variable_v4i32(<4 x i32> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4i32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: movl -24(%rsp,%rdi,4), %eax
@@ -1222,7 +1222,7 @@ define i32 @test_extractelement_variable
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -1244,7 +1244,7 @@ define i32 @test_extractelement_variable
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movl (%rsp,%rdi,4), %eax
@@ -1259,7 +1259,7 @@ define i32 @test_extractelement_variable
define float @test_extractelement_variable_v4f32(<4 x float> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v4f32:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $3, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1278,7 +1278,7 @@ define float @test_extractelement_variab
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1300,7 +1300,7 @@ define float @test_extractelement_variab
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-64, %rsp
; CHECK-NEXT: subq $128, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %zmm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
@@ -1315,7 +1315,7 @@ define float @test_extractelement_variab
define i16 @test_extractelement_variable_v8i16(<8 x i16> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v8i16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $7, %edi
; CHECK-NEXT: movzwl -24(%rsp,%rdi,2), %eax
@@ -1334,7 +1334,7 @@ define i16 @test_extractelement_variable
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -1356,7 +1356,7 @@ define i16 @test_extractelement_variable
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $31, %edi
@@ -1375,7 +1375,7 @@ define i16 @test_extractelement_variable
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $31, %edi
; SKX-NEXT: movzwl (%rsp,%rdi,2), %eax
@@ -1390,7 +1390,7 @@ define i16 @test_extractelement_variable
define i8 @test_extractelement_variable_v16i8(<16 x i8> %t1, i32 %index) {
; CHECK-LABEL: test_extractelement_variable_v16i8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
; CHECK-NEXT: andl $15, %edi
; CHECK-NEXT: movb -24(%rsp,%rdi), %al
@@ -1409,7 +1409,7 @@ define i8 @test_extractelement_variable_
; CHECK-NEXT: .cfi_def_cfa_register %rbp
; CHECK-NEXT: andq $-32, %rsp
; CHECK-NEXT: subq $64, %rsp
-; CHECK-NEXT: ## kill: def %edi killed %edi def %rdi
+; CHECK-NEXT: ## kill: def $edi killed $edi def $rdi
; CHECK-NEXT: vmovaps %ymm0, (%rsp)
; CHECK-NEXT: andl $31, %edi
; CHECK-NEXT: movb (%rsp,%rdi), %al
@@ -1432,7 +1432,7 @@ define i8 @test_extractelement_variable_
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp)
; KNL-NEXT: vmovaps %ymm0, (%rsp)
; KNL-NEXT: andl $63, %edi
@@ -1451,7 +1451,7 @@ define i8 @test_extractelement_variable_
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vmovaps %zmm0, (%rsp)
; SKX-NEXT: andl $63, %edi
; SKX-NEXT: movb (%rsp,%rdi), %al
@@ -1512,9 +1512,9 @@ define i8 @test_extractelement_variable_
define zeroext i8 @test_extractelement_varible_v2i1(<2 x i64> %a, <2 x i64> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v2i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleuq %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vextracti32x4 $0, %zmm0, -{{[0-9]+}}(%rsp)
@@ -1526,7 +1526,7 @@ define zeroext i8 @test_extractelement_v
;
; SKX-LABEL: test_extractelement_varible_v2i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleuq %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1543,9 +1543,9 @@ define zeroext i8 @test_extractelement_v
define zeroext i8 @test_extractelement_varible_v4i1(<4 x i32> %a, <4 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v4i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vextracti32x4 $0, %zmm0, -{{[0-9]+}}(%rsp)
@@ -1557,7 +1557,7 @@ define zeroext i8 @test_extractelement_v
;
; SKX-LABEL: test_extractelement_varible_v4i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleud %xmm1, %xmm0, %k0
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1574,9 +1574,9 @@ define zeroext i8 @test_extractelement_v
define zeroext i8 @test_extractelement_varible_v8i1(<8 x i32> %a, <8 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v8i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
-; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
+; KNL-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
@@ -1589,7 +1589,7 @@ define zeroext i8 @test_extractelement_v
;
; SKX-LABEL: test_extractelement_varible_v8i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleud %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2w %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1607,7 +1607,7 @@ define zeroext i8 @test_extractelement_v
define zeroext i8 @test_extractelement_varible_v16i1(<16 x i32> %a, <16 x i32> %b, i32 %index) {
; KNL-LABEL: test_extractelement_varible_v16i1:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vpcmpnleud %zmm1, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdb %zmm0, -{{[0-9]+}}(%rsp)
@@ -1619,7 +1619,7 @@ define zeroext i8 @test_extractelement_v
;
; SKX-LABEL: test_extractelement_varible_v16i1:
; SKX: ## %bb.0:
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleud %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %xmm0
; SKX-NEXT: vmovdqa %xmm0, -{{[0-9]+}}(%rsp)
@@ -1644,7 +1644,7 @@ define zeroext i8 @test_extractelement_v
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-32, %rsp
; KNL-NEXT: subq $64, %rsp
-; KNL-NEXT: ## kill: def %edi killed %edi def %rdi
+; KNL-NEXT: ## kill: def $edi killed $edi def $rdi
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm2, %ymm1, %ymm1
; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
@@ -1667,7 +1667,7 @@ define zeroext i8 @test_extractelement_v
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-32, %rsp
; SKX-NEXT: subq $64, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: vpmovm2b %k0, %ymm0
; SKX-NEXT: vmovdqa %ymm0, (%rsp)
@@ -1706,7 +1706,7 @@ define i32 @test_insertelement_variable_
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-32, %rsp
; KNL-NEXT: subq $64, %rsp
-; KNL-NEXT: ## kill: def %esi killed %esi def %rsi
+; KNL-NEXT: ## kill: def $esi killed $esi def $rsi
; KNL-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
@@ -1738,7 +1738,7 @@ define i32 @test_insertelement_variable_
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-32, %rsp
; SKX-NEXT: subq $64, %rsp
-; SKX-NEXT: ## kill: def %esi killed %esi def %rsi
+; SKX-NEXT: ## kill: def $esi killed $esi def $rsi
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpcmpnleub %ymm1, %ymm0, %k0
; SKX-NEXT: andl $31, %esi
@@ -1770,7 +1770,7 @@ define i64 @test_insertelement_variable_
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-64, %rsp
; KNL-NEXT: subq $128, %rsp
-; KNL-NEXT: ## kill: def %esi killed %esi def %rsi
+; KNL-NEXT: ## kill: def $esi killed $esi def $rsi
; KNL-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm2, %ymm0, %ymm0
; KNL-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
@@ -1821,7 +1821,7 @@ define i64 @test_insertelement_variable_
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %esi killed %esi def %rsi
+; SKX-NEXT: ## kill: def $esi killed $esi def $rsi
; SKX-NEXT: vpxor %xmm1, %xmm1, %xmm1
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: andl $63, %esi
@@ -2172,7 +2172,7 @@ define i128 @test_insertelement_variable
; KNL-NEXT: .cfi_def_cfa_register %rbp
; KNL-NEXT: andq $-128, %rsp
; KNL-NEXT: subq $256, %rsp ## imm = 0x100
-; KNL-NEXT: ## kill: def %esi killed %esi def %rsi
+; KNL-NEXT: ## kill: def $esi killed $esi def $rsi
; KNL-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; KNL-NEXT: vpxor %ymm4, %ymm0, %ymm0
; KNL-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
@@ -2255,7 +2255,7 @@ define i128 @test_insertelement_variable
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-128, %rsp
; SKX-NEXT: subq $256, %rsp ## imm = 0x100
-; SKX-NEXT: ## kill: def %esi killed %esi def %rsi
+; SKX-NEXT: ## kill: def $esi killed $esi def $rsi
; SKX-NEXT: vpxor %xmm2, %xmm2, %xmm2
; SKX-NEXT: vpcmpnleub %zmm2, %zmm0, %k0
; SKX-NEXT: vpcmpnleub %zmm2, %zmm1, %k1
Modified: llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-insert-extract_i1.ll Wed Jan 31 14:04:26 2018
@@ -13,7 +13,7 @@ define zeroext i8 @test_extractelement_v
; SKX-NEXT: .cfi_def_cfa_register %rbp
; SKX-NEXT: andq $-64, %rsp
; SKX-NEXT: subq $128, %rsp
-; SKX-NEXT: ## kill: def %edi killed %edi def %rdi
+; SKX-NEXT: ## kill: def $edi killed $edi def $rdi
; SKX-NEXT: vpcmpnleub %zmm1, %zmm0, %k0
; SKX-NEXT: vpmovm2b %k0, %zmm0
; SKX-NEXT: vmovdqa64 %zmm0, (%rsp)
Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics-upgrade.ll Wed Jan 31 14:04:26 2018
@@ -10,7 +10,7 @@ define i16 @unpckbw_test(i16 %a0, i16 %a
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: kunpckbw %k0, %k1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.kunpck.bw(i16 %a0, i16 %a1)
ret i16 %res
@@ -559,7 +559,7 @@ define i16 @test_pcmpeq_d(<16 x i32> %a,
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -571,7 +571,7 @@ define i16 @test_mask_pcmpeq_d(<16 x i32
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -584,7 +584,7 @@ define i8 @test_pcmpeq_q(<8 x i64> %a, <
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -596,7 +596,7 @@ define i8 @test_mask_pcmpeq_q(<8 x i64>
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpeqq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -609,7 +609,7 @@ define i16 @test_pcmpgt_d(<16 x i32> %a,
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 -1)
ret i16 %res
@@ -621,7 +621,7 @@ define i16 @test_mask_pcmpgt_d(<16 x i32
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.d.512(<16 x i32> %a, <16 x i32> %b, i16 %mask)
ret i16 %res
@@ -634,7 +634,7 @@ define i8 @test_pcmpgt_q(<8 x i64> %a, <
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 -1)
ret i8 %res
@@ -646,7 +646,7 @@ define i8 @test_mask_pcmpgt_q(<8 x i64>
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vpcmpgtq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.512(<8 x i64> %a, <8 x i64> %b, i8 %mask)
ret i8 %res
@@ -3054,7 +3054,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask_insertf32x4_512(<16 x float> %x0, <4 x float> %x1, <16 x float> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_insertf32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -3075,7 +3075,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
define <16 x i32>@test_int_x86_avx512_mask_inserti32x4_512(<16 x i32> %x0, <4 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_inserti32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm3
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x4 $1, %xmm1, %zmm0, %zmm2 {%k1}
@@ -3519,7 +3519,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x4_512(<4 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
@@ -3553,7 +3553,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x4_512(<4 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -3587,7 +3587,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x4_512(<4 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; CHECK-NEXT: kmovw %edi, %k1
@@ -3622,7 +3622,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x4_512(<4 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x4_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -3692,7 +3692,7 @@ define i8 @test_vptestmq(<8 x i64> %a0,
; CHECK-NEXT: vptestmq %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 -1)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.512(<8 x i64> %a0, <8 x i64> %a1, i8 %m)
@@ -3710,7 +3710,7 @@ define i16 @test_vptestmd(<16 x i32> %a0
; CHECK-NEXT: vptestmd %zmm1, %zmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 -1)
%res1 = call i16 @llvm.x86.avx512.ptestm.d.512(<16 x i32> %a0, <16 x i32> %a1, i16 %m)
@@ -3730,7 +3730,7 @@ define i16 at test_int_x86_avx512_ptestnm_d
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.d.512(<16 x i32> %x0, <16 x i32> %x1, i16-1)
@@ -3749,7 +3749,7 @@ define i8 at test_int_x86_avx512_ptestnm_q_
; CHECK-NEXT: kmovw %k1, %ecx
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.512(<8 x i64> %x0, <8 x i64> %x1, i8-1)
Modified: llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll Wed Jan 31 14:04:26 2018
@@ -40,7 +40,7 @@ define i16 @test_kand(i16 %a0, i16 %a1)
; CHECK-NEXT: kandw %k0, %k1, %k0
; CHECK-NEXT: kandw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kand.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kand.w(i16 %t1, i16 %a1)
@@ -58,7 +58,7 @@ define i16 @test_kandn(i16 %a0, i16 %a1)
; CHECK-NEXT: kandnw %k2, %k1, %k1
; CHECK-NEXT: kandnw %k0, %k1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kandn.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kandn.w(i16 %t1, i16 %a1)
@@ -72,7 +72,7 @@ define i16 @test_knot(i16 %a0) {
; CHECK-NEXT: kmovw %edi, %k0
; CHECK-NEXT: knotw %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.knot.w(i16 %a0)
ret i16 %res
@@ -89,7 +89,7 @@ define i16 @test_kor(i16 %a0, i16 %a1) {
; CHECK-NEXT: korw %k0, %k1, %k0
; CHECK-NEXT: korw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kor.w(i16 %t1, i16 %a1)
@@ -109,7 +109,7 @@ define i16 @test_kxnor(i16 %a0, i16 %a1)
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxnor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxnor.w(i16 %t1, i16 %a1)
@@ -127,7 +127,7 @@ define i16 @test_kxor(i16 %a0, i16 %a1)
; CHECK-NEXT: kxorw %k0, %k1, %k0
; CHECK-NEXT: kxorw %k0, %k2, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%t1 = call i16 @llvm.x86.avx512.kxor.w(i16 %a0, i16 8)
%t2 = call i16 @llvm.x86.avx512.kxor.w(i16 %t1, i16 %a1)
@@ -803,7 +803,7 @@ declare <8 x double> @llvm.x86.avx512.vb
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpleps {sae}, %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.cmp.ps.512(<16 x float> %a, <16 x float> %b, i32 2, i16 -1, i32 8)
ret i16 %res
@@ -815,7 +815,7 @@ declare <8 x double> @llvm.x86.avx512.vb
; CHECK: ## %bb.0:
; CHECK-NEXT: vcmpneqpd %zmm1, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.cmp.pd.512(<8 x double> %a, <8 x double> %b, i32 4, i8 -1, i32 4)
ret i8 %res
@@ -3304,7 +3304,7 @@ define i8 at test_int_x86_avx512_mask_cmp_s
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
@@ -3326,7 +3326,7 @@ define i8 at test_int_x86_avx512_mask_cmp_s
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: orl %edx, %eax
; CHECK-NEXT: orl %ecx, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
@@ -3348,7 +3348,7 @@ define i8 at test_int_x86_avx512_mask_cmp_s
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
@@ -3371,7 +3371,7 @@ define i8 at test_int_x86_avx512_mask_cmp_s
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: andl %edx, %eax
; CHECK-NEXT: andl %ecx, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)
Modified: llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll Wed Jan 31 14:04:26 2018
@@ -11,7 +11,7 @@ define i16 @mask16(i16 %x) {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: mask16:
@@ -19,7 +19,7 @@ define i16 @mask16(i16 %x) {
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotw %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask16:
@@ -27,7 +27,7 @@ define i16 @mask16(i16 %x) {
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask16:
@@ -35,7 +35,7 @@ define i16 @mask16(i16 %x) {
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotw %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -84,7 +84,7 @@ define i8 @mask8(i8 %x) {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: knotw %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: mask8:
@@ -92,7 +92,7 @@ define i8 @mask8(i8 %x) {
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: knotb %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mask8:
@@ -100,7 +100,7 @@ define i8 @mask8(i8 %x) {
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: knotw %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mask8:
@@ -108,7 +108,7 @@ define i8 @mask8(i8 %x) {
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: knotb %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -235,7 +235,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: korw %k0, %k2, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: mand16_mem:
@@ -246,7 +246,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: korw %k0, %k2, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: mand16_mem:
@@ -257,7 +257,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k2, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: mand16_mem:
@@ -268,7 +268,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: korw %k0, %k2, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
@@ -285,7 +285,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; KNL-NEXT: kmovw %edi, %k0
; KNL-NEXT: kshiftrw $8, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: shuf_test1:
@@ -293,7 +293,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; SKX-NEXT: kmovd %edi, %k0
; SKX-NEXT: kshiftrw $8, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: shuf_test1:
@@ -301,7 +301,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; AVX512BW-NEXT: kmovd %edi, %k0
; AVX512BW-NEXT: kshiftrw $8, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: shuf_test1:
@@ -309,7 +309,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; AVX512DQ-NEXT: kmovw %edi, %k0
; AVX512DQ-NEXT: kshiftrw $8, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -366,7 +366,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
; KNL-NEXT: kshiftrw $5, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andl $1, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -376,7 +376,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
; SKX-NEXT: kshiftrw $5, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andl $1, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
@@ -386,7 +386,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
; AVX512BW-NEXT: kshiftrw $5, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andl $1, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -396,7 +396,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
; AVX512DQ-NEXT: kshiftrw $5, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andl $1, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -412,7 +412,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
; KNL-NEXT: kshiftrw $5, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
; KNL-NEXT: andb $1, %al
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -422,7 +422,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
; SKX-NEXT: kshiftrw $5, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
; SKX-NEXT: andb $1, %al
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
;
@@ -432,7 +432,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
; AVX512BW-NEXT: kshiftrw $5, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
; AVX512BW-NEXT: andb $1, %al
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -442,7 +442,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
; AVX512DQ-NEXT: kshiftrw $5, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
; AVX512DQ-NEXT: andb $1, %al
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -498,14 +498,14 @@ entry:
define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) {
; KNL-LABEL: test4:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm3 killed %ymm3 def %zmm3
-; KNL-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm3 killed $ymm3 def $zmm3
+; KNL-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; KNL-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1 {%k1}
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -519,27 +519,27 @@ define <4 x i32> @test4(<4 x i64> %x, <4
;
; AVX512BW-LABEL: test4:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %ymm3 killed %ymm3 def %zmm3
-; AVX512BW-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512BW-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512BW-NEXT: ## kill: def $ymm3 killed $ymm3 def $zmm3
+; AVX512BW-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512BW-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512BW-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k1 {%k1}
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test4:
; AVX512DQ: ## %bb.0:
-; AVX512DQ-NEXT: ## kill: def %ymm3 killed %ymm3 def %zmm3
-; AVX512DQ-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512DQ-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512DQ-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512DQ-NEXT: ## kill: def $ymm3 killed $ymm3 def $zmm3
+; AVX512DQ-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512DQ-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512DQ-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512DQ-NEXT: vpcmpleq %zmm1, %zmm0, %k1
; AVX512DQ-NEXT: vpcmpgtq %zmm3, %zmm2, %k0 {%k1}
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%x_gt_y = icmp sgt <4 x i64> %x, %y
@@ -552,14 +552,14 @@ define <4 x i32> @test4(<4 x i64> %x, <4
define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) {
; KNL-LABEL: test5:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %xmm3 killed %xmm3 def %zmm3
-; KNL-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; KNL-NEXT: ## kill: def $xmm3 killed $xmm3 def $zmm3
+; KNL-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; KNL-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; KNL-NEXT: vpcmpleq %zmm3, %zmm2, %k1
; KNL-NEXT: vpcmpgtq %zmm0, %zmm1, %k1 {%k1}
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -572,27 +572,27 @@ define <2 x i64> @test5(<2 x i64> %x, <2
;
; AVX512BW-LABEL: test5:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %xmm3 killed %xmm3 def %zmm3
-; AVX512BW-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm3 killed $xmm3 def $zmm3
+; AVX512BW-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512BW-NEXT: vpcmpleq %zmm3, %zmm2, %k1
; AVX512BW-NEXT: vpcmpgtq %zmm0, %zmm1, %k1 {%k1}
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test5:
; AVX512DQ: ## %bb.0:
-; AVX512DQ-NEXT: ## kill: def %xmm3 killed %xmm3 def %zmm3
-; AVX512DQ-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512DQ-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm3 killed $xmm3 def $zmm3
+; AVX512DQ-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512DQ-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512DQ-NEXT: vpcmpleq %zmm3, %zmm2, %k1
; AVX512DQ-NEXT: vpcmpgtq %zmm0, %zmm1, %k0 {%k1}
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%x_gt_y = icmp slt <2 x i64> %x, %y
@@ -713,13 +713,13 @@ define <16 x i8> @test8(<16 x i32>%a, <1
; AVX512BW-NEXT: ## %bb.2:
; AVX512BW-NEXT: vpcmpltud %zmm2, %zmm1, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
; AVX512BW-NEXT: LBB17_1:
; AVX512BW-NEXT: vpcmpgtd %zmm2, %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -789,7 +789,7 @@ define <16 x i1> @test9(<16 x i1>%a, <16
; AVX512BW-NEXT: LBB18_3:
; AVX512BW-NEXT: vpmovb2m %zmm0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -831,7 +831,7 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x
; KNL-NEXT: LBB20_3:
; KNL-NEXT: vptestmd %zmm0, %zmm0, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -861,7 +861,7 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x
; AVX512BW-NEXT: LBB20_3:
; AVX512BW-NEXT: vptestmd %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -877,7 +877,7 @@ define <4 x i1> @test11(<4 x i1>%a, <4 x
; AVX512DQ-NEXT: LBB20_3:
; AVX512DQ-NEXT: vptestmd %zmm0, %zmm0, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%mask = icmp sgt i32 %a1, %b1
@@ -961,7 +961,7 @@ define <16 x i1> @test15(i32 %x, i32 %y)
; AVX512BW-NEXT: cmovgw %ax, %cx
; AVX512BW-NEXT: kmovd %ecx, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1192,7 +1192,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; KNL-NEXT: korw %k0, %k1, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -1230,7 +1230,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512BW-NEXT: kshiftlw $7, %k0, %k0
; AVX512BW-NEXT: korw %k0, %k1, %k0
; AVX512BW-NEXT: vpmovm2w %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -1251,7 +1251,7 @@ define <8 x i1> @test18(i8 %a, i16 %y) {
; AVX512DQ-NEXT: korb %k0, %k1, %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: vpmovdw %zmm0, %ymm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = bitcast i8 %a to <8 x i1>
@@ -1997,7 +1997,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a)
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2012,7 +2012,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a)
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2020,7 +2020,7 @@ define <2 x i16> @load_2i1(<2 x i1>* %a)
; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <2 x i1>, <2 x i1>* %a
@@ -2034,7 +2034,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a)
; KNL-NEXT: movzbl (%rdi), %eax
; KNL-NEXT: kmovw %eax, %k1
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -2049,7 +2049,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a)
; AVX512BW-NEXT: movzbl (%rdi), %eax
; AVX512BW-NEXT: kmovd %eax, %k1
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -2057,7 +2057,7 @@ define <4 x i16> @load_4i1(<4 x i1>* %a)
; AVX512DQ: ## %bb.0:
; AVX512DQ-NEXT: kmovb (%rdi), %k0
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
-; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512DQ-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512DQ-NEXT: vzeroupper
; AVX512DQ-NEXT: retq
%b = load <4 x i1>, <4 x i1>* %a
@@ -2494,7 +2494,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_add:
@@ -2503,7 +2503,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_add:
@@ -2512,7 +2512,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_add:
@@ -2521,7 +2521,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -2537,7 +2537,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_sub:
@@ -2546,7 +2546,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_sub:
@@ -2555,7 +2555,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_sub:
@@ -2564,7 +2564,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -2580,7 +2580,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v16i1_mul:
@@ -2589,7 +2589,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandw %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v16i1_mul:
@@ -2598,7 +2598,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v16i1_mul:
@@ -2607,7 +2607,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandw %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512DQ-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -2623,7 +2623,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_add:
@@ -2632,7 +2632,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_add:
@@ -2641,7 +2641,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_add:
@@ -2650,7 +2650,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -2666,7 +2666,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kxorw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_sub:
@@ -2675,7 +2675,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kxorb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_sub:
@@ -2684,7 +2684,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_sub:
@@ -2693,7 +2693,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kxorb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -2709,7 +2709,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; KNL-NEXT: kmovw %esi, %k1
; KNL-NEXT: kandw %k1, %k0, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %al killed %al killed %eax
+; KNL-NEXT: ## kill: def $al killed $al killed $eax
; KNL-NEXT: retq
;
; SKX-LABEL: test_v8i1_mul:
@@ -2718,7 +2718,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1
; SKX-NEXT: kandb %k1, %k0, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %al killed %al killed %eax
+; SKX-NEXT: ## kill: def $al killed $al killed $eax
; SKX-NEXT: retq
;
; AVX512BW-LABEL: test_v8i1_mul:
@@ -2727,7 +2727,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; AVX512BW-NEXT: kmovd %esi, %k1
; AVX512BW-NEXT: kandw %k1, %k0, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512BW-NEXT: ## kill: def $al killed $al killed $eax
; AVX512BW-NEXT: retq
;
; AVX512DQ-LABEL: test_v8i1_mul:
@@ -2736,7 +2736,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; AVX512DQ-NEXT: kmovw %esi, %k1
; AVX512DQ-NEXT: kandb %k1, %k0, %k0
; AVX512DQ-NEXT: kmovw %k0, %eax
-; AVX512DQ-NEXT: ## kill: def %al killed %al killed %eax
+; AVX512DQ-NEXT: ## kill: def $al killed $al killed $eax
; AVX512DQ-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
Modified: llvm/trunk/test/CodeGen/X86/avx512-memfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-memfold.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-memfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-memfold.ll Wed Jan 31 14:04:26 2018
@@ -7,7 +7,7 @@ define i8 @test_int_x86_avx512_mask_cmp_
; CHECK-NEXT: kmovw %esi, %k1
; CHECK-NEXT: vcmpunordss (%rdi), %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%b.val = load float, float* %b
%bv0 = insertelement <4 x float> undef, float %b.val, i32 0
Modified: llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-regcall-Mask.ll Wed Jan 31 14:04:26 2018
@@ -310,9 +310,9 @@ define x86_regcallcc i32 @test_argv32i1(
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
-; X32-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
-; X32-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
+; X32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; X32-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1
+; X32-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2
; X32-NEXT: calll _test_argv32i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
; X32-NEXT: vmovups {{[0-9]+}}(%esp), %xmm5 # 16-byte Reload
@@ -340,9 +340,9 @@ define x86_regcallcc i32 @test_argv32i1(
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
-; WIN64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
-; WIN64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
+; WIN64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; WIN64-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1
+; WIN64-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2
; WIN64-NEXT: callq test_argv32i1helper
; WIN64-NEXT: nop
; WIN64-NEXT: addq $32, %rsp
@@ -384,9 +384,9 @@ define x86_regcallcc i32 @test_argv32i1(
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
-; LINUXOSX64-NEXT: # kill: def %ymm1 killed %ymm1 killed %zmm1
-; LINUXOSX64-NEXT: # kill: def %ymm2 killed %ymm2 killed %zmm2
+; LINUXOSX64-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
+; LINUXOSX64-NEXT: # kill: def $ymm1 killed $ymm1 killed $zmm1
+; LINUXOSX64-NEXT: # kill: def $ymm2 killed $ymm2 killed $zmm2
; LINUXOSX64-NEXT: callq test_argv32i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
; LINUXOSX64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload
@@ -538,9 +538,9 @@ define x86_regcallcc i16 @test_argv16i1(
; X32-NEXT: vpmovm2b %k2, %zmm0
; X32-NEXT: vpmovm2b %k1, %zmm1
; X32-NEXT: vpmovm2b %k0, %zmm2
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; X32-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv16i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
@@ -568,9 +568,9 @@ define x86_regcallcc i16 @test_argv16i1(
; WIN64-NEXT: vpmovm2b %k2, %zmm0
; WIN64-NEXT: vpmovm2b %k1, %zmm1
; WIN64-NEXT: vpmovm2b %k0, %zmm2
-; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; WIN64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; WIN64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv16i1helper
; WIN64-NEXT: nop
@@ -612,9 +612,9 @@ define x86_regcallcc i16 @test_argv16i1(
; LINUXOSX64-NEXT: vpmovm2b %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2b %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2b %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; LINUXOSX64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; LINUXOSX64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv16i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
@@ -705,9 +705,9 @@ define i16 @caller_retv16i1() #0 {
; X32-LABEL: caller_retv16i1:
; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv16i1
-; X32-NEXT: # kill: def %ax killed %ax def %eax
+; X32-NEXT: # kill: def $ax killed $ax def $eax
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: caller_retv16i1:
@@ -724,9 +724,9 @@ define i16 @caller_retv16i1() #0 {
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv16i1
-; WIN64-NEXT: # kill: def %ax killed %ax def %eax
+; WIN64-NEXT: # kill: def $ax killed $ax def $eax
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
+; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
@@ -742,9 +742,9 @@ define i16 @caller_retv16i1() #0 {
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv16i1
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax def $eax
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: popq %rcx
; LINUXOSX64-NEXT: retq
entry:
@@ -771,9 +771,9 @@ define x86_regcallcc i8 @test_argv8i1(<8
; X32-NEXT: vpmovm2w %k2, %zmm0
; X32-NEXT: vpmovm2w %k1, %zmm1
; X32-NEXT: vpmovm2w %k0, %zmm2
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; X32-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; X32-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; X32-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; X32-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; X32-NEXT: vzeroupper
; X32-NEXT: calll _test_argv8i1helper
; X32-NEXT: vmovups (%esp), %xmm4 # 16-byte Reload
@@ -801,9 +801,9 @@ define x86_regcallcc i8 @test_argv8i1(<8
; WIN64-NEXT: vpmovm2w %k2, %zmm0
; WIN64-NEXT: vpmovm2w %k1, %zmm1
; WIN64-NEXT: vpmovm2w %k0, %zmm2
-; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; WIN64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; WIN64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; WIN64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; WIN64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; WIN64-NEXT: vzeroupper
; WIN64-NEXT: callq test_argv8i1helper
; WIN64-NEXT: nop
@@ -845,9 +845,9 @@ define x86_regcallcc i8 @test_argv8i1(<8
; LINUXOSX64-NEXT: vpmovm2w %k2, %zmm0
; LINUXOSX64-NEXT: vpmovm2w %k1, %zmm1
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm2
-; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
-; LINUXOSX64-NEXT: # kill: def %xmm1 killed %xmm1 killed %zmm1
-; LINUXOSX64-NEXT: # kill: def %xmm2 killed %xmm2 killed %zmm2
+; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
+; LINUXOSX64-NEXT: # kill: def $xmm1 killed $xmm1 killed $zmm1
+; LINUXOSX64-NEXT: # kill: def $xmm2 killed $xmm2 killed $zmm2
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: callq test_argv8i1helper
; LINUXOSX64-NEXT: vmovaps (%rsp), %xmm8 # 16-byte Reload
@@ -938,10 +938,10 @@ define <8 x i1> @caller_retv8i1() #0 {
; X32-LABEL: caller_retv8i1:
; X32: # %bb.0: # %entry
; X32-NEXT: calll _test_retv8i1
-; X32-NEXT: # kill: def %al killed %al def %eax
+; X32-NEXT: # kill: def $al killed $al def $eax
; X32-NEXT: kmovd %eax, %k0
; X32-NEXT: vpmovm2w %k0, %zmm0
-; X32-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; X32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; X32-NEXT: vzeroupper
; X32-NEXT: retl
;
@@ -959,10 +959,10 @@ define <8 x i1> @caller_retv8i1() #0 {
; WIN64-NEXT: .seh_savexmm 6, 0
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: callq test_retv8i1
-; WIN64-NEXT: # kill: def %al killed %al def %eax
+; WIN64-NEXT: # kill: def $al killed $al def $eax
; WIN64-NEXT: kmovd %eax, %k0
; WIN64-NEXT: vpmovm2w %k0, %zmm0
-; WIN64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; WIN64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; WIN64-NEXT: vmovaps (%rsp), %xmm6 # 16-byte Reload
; WIN64-NEXT: vmovaps {{[0-9]+}}(%rsp), %xmm7 # 16-byte Reload
; WIN64-NEXT: addq $40, %rsp
@@ -979,10 +979,10 @@ define <8 x i1> @caller_retv8i1() #0 {
; LINUXOSX64-NEXT: pushq %rax
; LINUXOSX64-NEXT: .cfi_def_cfa_offset 16
; LINUXOSX64-NEXT: callq test_retv8i1
-; LINUXOSX64-NEXT: # kill: def %al killed %al def %eax
+; LINUXOSX64-NEXT: # kill: def $al killed $al def $eax
; LINUXOSX64-NEXT: kmovd %eax, %k0
; LINUXOSX64-NEXT: vpmovm2w %k0, %zmm0
-; LINUXOSX64-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0
+; LINUXOSX64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0
; LINUXOSX64-NEXT: popq %rax
; LINUXOSX64-NEXT: vzeroupper
; LINUXOSX64-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-regcall-NoMask.ll Wed Jan 31 14:04:26 2018
@@ -8,19 +8,19 @@ define x86_regcallcc i1 @test_argReti1(i
; X32-LABEL: test_argReti1:
; X32: # %bb.0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti1:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: def %al killed %al killed %eax
+; WIN64-NEXT: # kill: def $al killed $al killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti1:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax
+; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax
; LINUXOSX64-NEXT: retq
%add = add i1 %a, 1
ret i1 %add
@@ -75,19 +75,19 @@ define x86_regcallcc i8 @test_argReti8(i
; X32-LABEL: test_argReti8:
; X32: # %bb.0:
; X32-NEXT: incb %al
-; X32-NEXT: # kill: def %al killed %al killed %eax
+; X32-NEXT: # kill: def $al killed $al killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti8:
; WIN64: # %bb.0:
; WIN64-NEXT: incb %al
-; WIN64-NEXT: # kill: def %al killed %al killed %eax
+; WIN64-NEXT: # kill: def $al killed $al killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti8:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incb %al
-; LINUXOSX64-NEXT: # kill: def %al killed %al killed %eax
+; LINUXOSX64-NEXT: # kill: def $al killed $al killed $eax
; LINUXOSX64-NEXT: retq
%add = add i8 %a, 1
ret i8 %add
@@ -142,19 +142,19 @@ define x86_regcallcc i16 @test_argReti16
; X32-LABEL: test_argReti16:
; X32: # %bb.0:
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: retl
;
; WIN64-LABEL: test_argReti16:
; WIN64: # %bb.0:
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
+; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: retq
;
; LINUXOSX64-LABEL: test_argReti16:
; LINUXOSX64: # %bb.0:
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: retq
%add = add i16 %a, 1
ret i16 %add
@@ -167,9 +167,9 @@ define x86_regcallcc i16 @test_CallargRe
; X32-NEXT: pushl %esp
; X32-NEXT: incl %eax
; X32-NEXT: calll _test_argReti16
-; X32-NEXT: # kill: def %ax killed %ax def %eax
+; X32-NEXT: # kill: def $ax killed $ax def $eax
; X32-NEXT: incl %eax
-; X32-NEXT: # kill: def %ax killed %ax killed %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
; X32-NEXT: popl %esp
; X32-NEXT: retl
;
@@ -180,9 +180,9 @@ define x86_regcallcc i16 @test_CallargRe
; WIN64-NEXT: .seh_endprologue
; WIN64-NEXT: incl %eax
; WIN64-NEXT: callq test_argReti16
-; WIN64-NEXT: # kill: def %ax killed %ax def %eax
+; WIN64-NEXT: # kill: def $ax killed $ax def $eax
; WIN64-NEXT: incl %eax
-; WIN64-NEXT: # kill: def %ax killed %ax killed %eax
+; WIN64-NEXT: # kill: def $ax killed $ax killed $eax
; WIN64-NEXT: popq %rsp
; WIN64-NEXT: retq
; WIN64-NEXT: .seh_handlerdata
@@ -196,9 +196,9 @@ define x86_regcallcc i16 @test_CallargRe
; LINUXOSX64-NEXT: .cfi_offset %rsp, -16
; LINUXOSX64-NEXT: incl %eax
; LINUXOSX64-NEXT: callq test_argReti16
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax def %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax def $eax
; LINUXOSX64-NEXT: incl %eax
-; LINUXOSX64-NEXT: # kill: def %ax killed %ax killed %eax
+; LINUXOSX64-NEXT: # kill: def $ax killed $ax killed $eax
; LINUXOSX64-NEXT: popq %rsp
; LINUXOSX64-NEXT: retq
%b = add i16 %a, 1
Modified: llvm/trunk/test/CodeGen/X86/avx512-schedule.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-schedule.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-schedule.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-schedule.ll Wed Jan 31 14:04:26 2018
@@ -4281,7 +4281,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
; GENERIC-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_16i8_to_16i1:
@@ -4289,7 +4289,7 @@ define i16 @trunc_16i8_to_16i1(<16 x i8>
; SKX-NEXT: vpsllw $7, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovb2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i8>%a to <16 x i1>
%mask = bitcast <16 x i1> %mask_b to i16
@@ -4302,7 +4302,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
; GENERIC-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [3:1.00]
; GENERIC-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: vzeroupper # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -4311,7 +4311,7 @@ define i16 @trunc_16i32_to_16i1(<16 x i3
; SKX-NEXT: vpslld $31, %zmm0, %zmm0 # sched: [1:0.50]
; SKX-NEXT: vptestmd %zmm0, %zmm0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <16 x i32>%a to <16 x i1>
@@ -4347,7 +4347,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
; GENERIC-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:1.00]
; GENERIC-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_8i16_to_8i1:
@@ -4355,7 +4355,7 @@ define i8 @trunc_8i16_to_8i1(<8 x i16> %
; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 # sched: [1:0.50]
; SKX-NEXT: vpmovw2m %xmm0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%mask_b = trunc <8 x i16>%a to <8 x i1>
%mask = bitcast <8 x i1> %mask_b to i8
@@ -4392,7 +4392,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; GENERIC-NEXT: kmovw %edi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: trunc_i32_to_i1:
@@ -4405,7 +4405,7 @@ define i16 @trunc_i32_to_i1(i32 %a) {
; SKX-NEXT: kmovw %edi, %k1 # sched: [1:1.00]
; SKX-NEXT: korw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%a_i = trunc i32 %a to i1
%maskv = insertelement <16 x i1> <i1 true, i1 false, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, i1 %a_i, i32 0
@@ -6666,7 +6666,7 @@ define i16 @mask16(i16 %x) {
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask16:
@@ -6674,7 +6674,7 @@ define i16 @mask16(i16 %x) {
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotw %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = xor <16 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -6709,7 +6709,7 @@ define i8 @mask8(i8 %x) {
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mask8:
@@ -6717,7 +6717,7 @@ define i8 @mask8(i8 %x) {
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: knotb %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -6826,7 +6826,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: mand16_mem:
@@ -6837,7 +6837,7 @@ define i16 @mand16_mem(<16 x i1>* %x, <1
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: korw %k0, %k2, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%ma = load <16 x i1>, <16 x i1>* %x
%mb = load <16 x i1>, <16 x i1>* %y
@@ -6854,7 +6854,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; GENERIC-NEXT: kmovd %edi, %k0 # sched: [1:0.33]
; GENERIC-NEXT: kshiftrw $8, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: shuf_test1:
@@ -6862,7 +6862,7 @@ define i8 @shuf_test1(i16 %v) nounwind {
; SKX-NEXT: kmovd %edi, %k0 # sched: [1:1.00]
; SKX-NEXT: kshiftrw $8, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%v1 = bitcast i16 %v to <16 x i1>
%mask = shufflevector <16 x i1> %v1, <16 x i1> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -6901,7 +6901,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
; GENERIC-NEXT: kshiftrw $5, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
; GENERIC-NEXT: andl $1, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: vzeroupper # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -6911,7 +6911,7 @@ define i16 @zext_test2(<16 x i32> %a, <1
; SKX-NEXT: kshiftrw $5, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andl $1, %eax # sched: [1:0.25]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -6927,7 +6927,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
; GENERIC-NEXT: kshiftrw $5, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
; GENERIC-NEXT: andb $1, %al # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: vzeroupper # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
@@ -6937,7 +6937,7 @@ define i8 @zext_test3(<16 x i32> %a, <16
; SKX-NEXT: kshiftrw $5, %k0, %k0 # sched: [3:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
; SKX-NEXT: andb $1, %al # sched: [1:0.25]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
%cmp_res = icmp ugt <16 x i32> %a, %b
@@ -8027,7 +8027,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_add:
@@ -8036,7 +8036,7 @@ define i16 @test_v16i1_add(i16 %x, i16 %
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8052,7 +8052,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_sub:
@@ -8061,7 +8061,7 @@ define i16 @test_v16i1_sub(i16 %x, i16 %
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8077,7 +8077,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %ax killed %ax killed %eax
+; GENERIC-NEXT: # kill: def $ax killed $ax killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v16i1_mul:
@@ -8086,7 +8086,7 @@ define i16 @test_v16i1_mul(i16 %x, i16 %
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandw %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %ax killed %ax killed %eax
+; SKX-NEXT: # kill: def $ax killed $ax killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i16 %x to <16 x i1>
%m1 = bitcast i16 %y to <16 x i1>
@@ -8102,7 +8102,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_add:
@@ -8111,7 +8111,7 @@ define i8 @test_v8i1_add(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -8127,7 +8127,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_sub:
@@ -8136,7 +8136,7 @@ define i8 @test_v8i1_sub(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kxorb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
@@ -8152,7 +8152,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; GENERIC-NEXT: kmovd %esi, %k1 # sched: [1:0.33]
; GENERIC-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
; GENERIC-NEXT: kmovd %k0, %eax # sched: [1:0.33]
-; GENERIC-NEXT: # kill: def %al killed %al killed %eax
+; GENERIC-NEXT: # kill: def $al killed $al killed $eax
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test_v8i1_mul:
@@ -8161,7 +8161,7 @@ define i8 @test_v8i1_mul(i8 %x, i8 %y) {
; SKX-NEXT: kmovd %esi, %k1 # sched: [1:1.00]
; SKX-NEXT: kandb %k1, %k0, %k0 # sched: [1:1.00]
; SKX-NEXT: kmovd %k0, %eax # sched: [3:1.00]
-; SKX-NEXT: # kill: def %al killed %al killed %eax
+; SKX-NEXT: # kill: def $al killed $al killed $eax
; SKX-NEXT: retq # sched: [7:1.00]
%m0 = bitcast i8 %x to <8 x i1>
%m1 = bitcast i8 %y to <8 x i1>
Modified: llvm/trunk/test/CodeGen/X86/avx512-select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-select.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-select.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-select.ll Wed Jan 31 14:04:26 2018
@@ -155,7 +155,7 @@ define i8 @select05_mem(<8 x i1>* %a.0,
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: korw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: def %al killed %al killed %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
;
; X64-LABEL: select05_mem:
@@ -166,7 +166,7 @@ define i8 @select05_mem(<8 x i1>* %a.0,
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: korw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -205,7 +205,7 @@ define i8 @select06_mem(<8 x i1>* %a.0,
; X86-NEXT: kmovw %eax, %k1
; X86-NEXT: kandw %k1, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: def %al killed %al killed %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
;
; X64-LABEL: select06_mem:
@@ -216,7 +216,7 @@ define i8 @select06_mem(<8 x i1>* %a.0,
; X64-NEXT: kmovw %eax, %k1
; X64-NEXT: kandw %k1, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%mask = load <8 x i1> , <8 x i1>* %m
%a = load <8 x i1> , <8 x i1>* %a.0
@@ -237,7 +237,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8
; X86-NEXT: kandw %k0, %k1, %k0
; X86-NEXT: korw %k2, %k0, %k0
; X86-NEXT: kmovw %k0, %eax
-; X86-NEXT: # kill: def %al killed %al killed %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
;
; X64-LABEL: select07:
@@ -249,7 +249,7 @@ define i8 @select07(i8 %a.0, i8 %b.0, i8
; X64-NEXT: kandw %k0, %k1, %k0
; X64-NEXT: korw %k2, %k0, %k0
; X64-NEXT: kmovw %k0, %eax
-; X64-NEXT: # kill: def %al killed %al killed %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
%mask = bitcast i8 %m to <8 x i1>
%a = bitcast i8 %a.0 to <8 x i1>
Modified: llvm/trunk/test/CodeGen/X86/avx512-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shift.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shift.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shift.ll Wed Jan 31 14:04:26 2018
@@ -34,7 +34,7 @@ define <4 x i64> @shift_4_i64(<4 x i64>
; KNL-NEXT: vpsrlq $1, %ymm0, %ymm0
; KNL-NEXT: vpsllq $12, %ymm0, %ymm0
; KNL-NEXT: vpsraq $12, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: shift_4_i64:
@@ -106,10 +106,10 @@ define <8 x i64> @variable_sra2(<8 x i64
define <4 x i64> @variable_sra3(<4 x i64> %x, <4 x i64> %y) {
; KNL-LABEL: variable_sra3:
; KNL: # %bb.0:
-; KNL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpsravq %zmm1, %zmm0, %zmm0
-; KNL-NEXT: # kill: def %ymm0 killed %ymm0 killed %zmm0
+; KNL-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra3:
@@ -127,7 +127,7 @@ define <8 x i16> @variable_sra4(<8 x i16
; KNL-NEXT: vpmovsxwd %xmm0, %ymm0
; KNL-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: retq
;
; SKX-LABEL: variable_sra4:
Modified: llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-shuffles/partial_permute.ll Wed Jan 31 14:04:26 2018
@@ -789,7 +789,7 @@ define <8 x i16> @test_32xi16_to_8xi16_p
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -911,7 +911,7 @@ define <8 x i16> @test_32xi16_to_8xi16_p
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u>
; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <32 x i16>, <32 x i16>* %vp
@@ -1710,7 +1710,7 @@ define <4 x i32> @test_16xi32_to_4xi32_p
; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <13,0,0,6,u,u,u,u>
; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x i32>, <16 x i32>* %vp
@@ -3681,7 +3681,7 @@ define <4 x float> @test_16xfloat_to_4xf
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = <3,3,15,9,u,u,u,u>
; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <16 x float>, <16 x float>* %vp
@@ -4565,7 +4565,7 @@ define <2 x double> @test_8xdouble_to_2x
; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2
; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [1,6,3,6]
; CHECK-NEXT: vpermi2pd %ymm2, %ymm1, %ymm0
-; CHECK-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0
+; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
%vec = load <8 x double>, <8 x double>* %vp
Modified: llvm/trunk/test/CodeGen/X86/avx512-trunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-trunc.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-trunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-trunc.ll Wed Jan 31 14:04:26 2018
@@ -57,9 +57,9 @@ define void @trunc_qb_512_mem(<8 x i64>
define <4 x i8> @trunc_qb_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qb_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -75,7 +75,7 @@ define <4 x i8> @trunc_qb_256(<4 x i64>
define void @trunc_qb_256_mem(<4 x i64> %i, <4 x i8>* %res) #0 {
; KNL-LABEL: trunc_qb_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovd %xmm0, (%rdi)
@@ -140,9 +140,9 @@ define void @trunc_qw_512_mem(<8 x i64>
define <4 x i16> @trunc_qw_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qw_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -158,7 +158,7 @@ define <4 x i16> @trunc_qw_256(<4 x i64>
define void @trunc_qw_256_mem(<4 x i64> %i, <4 x i16>* %res) #0 {
; KNL-LABEL: trunc_qw_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; KNL-NEXT: vmovq %xmm0, (%rdi)
@@ -223,9 +223,9 @@ define void @trunc_qd_512_mem(<8 x i64>
define <4 x i32> @trunc_qd_256(<4 x i64> %i) #0 {
; KNL-LABEL: trunc_qd_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -241,7 +241,7 @@ define <4 x i32> @trunc_qd_256(<4 x i64>
define void @trunc_qd_256_mem(<4 x i64> %i, <4 x i32>* %res) #0 {
; KNL-LABEL: trunc_qd_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovqd %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
@@ -305,9 +305,9 @@ define void @trunc_db_512_mem(<16 x i32>
define <8 x i8> @trunc_db_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_db_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -323,7 +323,7 @@ define <8 x i8> @trunc_db_256(<8 x i32>
define void @trunc_db_256_mem(<8 x i32> %i, <8 x i8>* %res) #0 {
; KNL-LABEL: trunc_db_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; KNL-NEXT: vmovq %xmm0, (%rdi)
@@ -387,9 +387,9 @@ define void @trunc_dw_512_mem(<16 x i32>
define <8 x i16> @trunc_dw_256(<8 x i32> %i) #0 {
; KNL-LABEL: trunc_dw_256:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
-; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0
+; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -405,7 +405,7 @@ define <8 x i16> @trunc_dw_256(<8 x i32>
define void @trunc_dw_256_mem(<8 x i32> %i, <8 x i16>* %res) #0 {
; KNL-LABEL: trunc_dw_256_mem:
; KNL: ## %bb.0:
-; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; KNL-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; KNL-NEXT: vpmovdw %zmm0, %ymm0
; KNL-NEXT: vmovdqa %xmm0, (%rdi)
; KNL-NEXT: vzeroupper
Modified: llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll Wed Jan 31 14:04:26 2018
@@ -120,7 +120,7 @@ define <8 x double> @_inreg8xdouble(do
define <8 x double> @_sd8xdouble_mask(double %a, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm2 killed %ymm2 def %zmm2
+; ALL-NEXT: # kill: def $ymm2 killed $ymm2 def $zmm2
; ALL-NEXT: vptestmd %zmm2, %zmm2, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1}
; ALL-NEXT: vmovapd %zmm1, %zmm0
@@ -135,7 +135,7 @@ define <8 x double> @_sd8xdouble_mask(
define <8 x double> @_sd8xdouble_maskz(double %a, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; ALL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; ALL-NEXT: vptestmd %zmm1, %zmm1, %k1
; ALL-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z}
; ALL-NEXT: retq
@@ -160,7 +160,7 @@ define <8 x double> @_sd8xdouble_load(
define <8 x double> @_sd8xdouble_mask_load(double* %a.ptr, <8 x double> %i, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_mask_load:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm1 killed %ymm1 def %zmm1
+; ALL-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
; ALL-NEXT: vptestmd %zmm1, %zmm1, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1}
; ALL-NEXT: retq
@@ -175,7 +175,7 @@ define <8 x double> @_sd8xdouble_mask_
define <8 x double> @_sd8xdouble_maskz_load(double* %a.ptr, <8 x i32> %mask1) {
; ALL-LABEL: _sd8xdouble_maskz_load:
; ALL: # %bb.0:
-; ALL-NEXT: # kill: def %ymm0 killed %ymm0 def %zmm0
+; ALL-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
; ALL-NEXT: vptestmd %zmm0, %zmm0, %k1
; ALL-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z}
; ALL-NEXT: retq
Modified: llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll Wed Jan 31 14:04:26 2018
@@ -73,12 +73,12 @@ define <8 x i64> @test6_unsigned(<8 x i6
define <4 x float> @test7(<4 x float> %a, <4 x float> %b) {
; AVX512-LABEL: test7:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -97,12 +97,12 @@ define <4 x float> @test7(<4 x float> %a
define <2 x double> @test8(<2 x double> %a, <2 x double> %b) {
; AVX512-LABEL: test8:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -120,11 +120,11 @@ define <2 x double> @test8(<2 x double>
define <8 x i32> @test9(<8 x i32> %x, <8 x i32> %y) nounwind {
; AVX512-LABEL: test9:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vpcmpeqd %zmm1, %zmm0, %k1
; AVX512-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test9:
@@ -140,11 +140,11 @@ define <8 x i32> @test9(<8 x i32> %x, <8
define <8 x float> @test10(<8 x float> %x, <8 x float> %y) nounwind {
; AVX512-LABEL: test10:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vcmpeqps %zmm1, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test10:
@@ -175,7 +175,7 @@ define i16 @test12(<16 x i64> %a, <16 x
; KNL-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; KNL-NEXT: kunpckbw %k0, %k1, %k0
; KNL-NEXT: kmovw %k0, %eax
-; KNL-NEXT: ## kill: def %ax killed %ax killed %eax
+; KNL-NEXT: ## kill: def $ax killed $ax killed $eax
; KNL-NEXT: vzeroupper
; KNL-NEXT: retq
;
@@ -185,7 +185,7 @@ define i16 @test12(<16 x i64> %a, <16 x
; AVX512BW-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; AVX512BW-NEXT: kunpckbw %k0, %k1, %k0
; AVX512BW-NEXT: kmovd %k0, %eax
-; AVX512BW-NEXT: ## kill: def %ax killed %ax killed %eax
+; AVX512BW-NEXT: ## kill: def $ax killed $ax killed $eax
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -195,7 +195,7 @@ define i16 @test12(<16 x i64> %a, <16 x
; SKX-NEXT: vpcmpeqq %zmm3, %zmm1, %k1
; SKX-NEXT: kunpckbw %k0, %k1, %k0
; SKX-NEXT: kmovd %k0, %eax
-; SKX-NEXT: ## kill: def %ax killed %ax killed %eax
+; SKX-NEXT: ## kill: def $ax killed $ax killed $eax
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
%res = icmp eq <16 x i64> %a, %b
@@ -503,7 +503,7 @@ define <8 x i32>@test28(<8 x i64> %x, <8
; AVX512-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
; AVX512-NEXT: kxnorw %k1, %k0, %k1
; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test28:
@@ -537,7 +537,7 @@ define <16 x i8>@test29(<16 x i32> %x, <
; AVX512BW-NEXT: vpcmpgtd %zmm3, %zmm2, %k1
; AVX512BW-NEXT: kxorw %k1, %k0, %k0
; AVX512BW-NEXT: vpmovm2b %k0, %zmm0
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -559,11 +559,11 @@ define <16 x i8>@test29(<16 x i32> %x, <
define <4 x double> @test30(<4 x double> %x, <4 x double> %y) nounwind {
; AVX512-LABEL: test30:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vcmpeqpd %zmm1, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test30:
@@ -580,12 +580,12 @@ define <4 x double> @test30(<4 x double>
define <2 x double> @test31(<2 x double> %x, <2 x double> %x1, <2 x double>* %yp) nounwind {
; AVX512-LABEL: test31:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vmovupd (%rdi), %xmm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -604,12 +604,12 @@ define <2 x double> @test31(<2 x double>
define <4 x double> @test32(<4 x double> %x, <4 x double> %x1, <4 x double>* %yp) nounwind {
; AVX512-LABEL: test32:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vmovupd (%rdi), %ymm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test32:
@@ -639,12 +639,12 @@ define <8 x double> @test33(<8 x double>
define <4 x float> @test34(<4 x float> %x, <4 x float> %x1, <4 x float>* %yp) nounwind {
; AVX512-LABEL: test34:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vmovups (%rdi), %xmm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -662,12 +662,12 @@ define <4 x float> @test34(<4 x float> %
define <8 x float> @test35(<8 x float> %x, <8 x float> %x1, <8 x float>* %yp) nounwind {
; AVX512-LABEL: test35:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vmovups (%rdi), %ymm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test35:
@@ -713,12 +713,12 @@ define <8 x double> @test37(<8 x double>
define <4 x double> @test38(<4 x double> %x, <4 x double> %x1, double* %ptr) nounwind {
; AVX512-LABEL: test38:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vbroadcastsd (%rdi), %ymm2
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test38:
@@ -739,12 +739,12 @@ define <4 x double> @test38(<4 x double>
define <2 x double> @test39(<2 x double> %x, <2 x double> %x1, double* %ptr) nounwind {
; AVX512-LABEL: test39:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX512-NEXT: vcmpltpd %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -783,12 +783,12 @@ define <16 x float> @test40(<16 x floa
define <8 x float> @test41(<8 x float> %x, <8 x float> %x1, float* %ptr) nounwind {
; AVX512-LABEL: test41:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; AVX512-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; AVX512-NEXT: vbroadcastss (%rdi), %ymm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512-NEXT: retq
;
; SKX-LABEL: test41:
@@ -809,12 +809,12 @@ define <8 x float> @test41(<8 x float>
define <4 x float> @test42(<4 x float> %x, <4 x float> %x1, float* %ptr) nounwind {
; AVX512-LABEL: test42:
; AVX512: ## %bb.0:
-; AVX512-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
+; AVX512-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 def $zmm0
; AVX512-NEXT: vbroadcastss (%rdi), %xmm2
; AVX512-NEXT: vcmpltps %zmm2, %zmm0, %k1
; AVX512-NEXT: vblendmps %zmm0, %zmm1, %zmm0 {%k1}
-; AVX512-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
;
@@ -935,11 +935,11 @@ define <16 x i8> @test47(<16 x i32> %a,
;
; AVX512BW-LABEL: test47:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpblendmb %zmm1, %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
@@ -965,11 +965,11 @@ define <16 x i16> @test48(<16 x i32> %a,
;
; AVX512BW-LABEL: test48:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; AVX512BW-NEXT: ## kill: def $ymm2 killed $ymm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
; AVX512BW-NEXT: vptestnmd %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpblendmw %zmm1, %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; AVX512BW-NEXT: retq
;
; SKX-LABEL: test48:
@@ -994,11 +994,11 @@ define <8 x i16> @test49(<8 x i64> %a, <
;
; AVX512BW-LABEL: test49:
; AVX512BW: ## %bb.0:
-; AVX512BW-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
-; AVX512BW-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; AVX512BW-NEXT: ## kill: def $xmm2 killed $xmm2 def $zmm2
+; AVX512BW-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; AVX512BW-NEXT: vptestnmq %zmm0, %zmm0, %k1
; AVX512BW-NEXT: vpblendmw %zmm1, %zmm2, %zmm0 {%k1}
-; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
;
Modified: llvm/trunk/test/CodeGen/X86/avx512-vec3-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-vec3-crash.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-vec3-crash.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-vec3-crash.ll Wed Jan 31 14:04:26 2018
@@ -19,9 +19,9 @@ define <3 x i8 > @foo(<3 x i8>%x, <3 x i
; CHECK-NEXT: vpextrb $0, %xmm0, %eax
; CHECK-NEXT: vpextrb $4, %xmm0, %edx
; CHECK-NEXT: vpextrb $8, %xmm0, %ecx
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
-; CHECK-NEXT: # kill: def %dl killed %dl killed %edx
-; CHECK-NEXT: # kill: def %cl killed %cl killed %ecx
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
+; CHECK-NEXT: # kill: def $dl killed $dl killed $edx
+; CHECK-NEXT: # kill: def $cl killed $cl killed $ecx
; CHECK-NEXT: retq
%cmp.i = icmp slt <3 x i8> %x, %a
%res = sext <3 x i1> %cmp.i to <3 x i8>
Modified: llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-intrinsics-upgrade.ll Wed Jan 31 14:04:26 2018
@@ -1967,7 +1967,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8
; AVX512F-32-NEXT: kmovd %edx, %k7
; AVX512F-32-NEXT: movl %ebp, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4
; AVX512F-32-NEXT: kshiftrq $47, %k4, %k4
@@ -1982,7 +1982,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8
; AVX512F-32-NEXT: kshiftrq $18, %k4, %k3
; AVX512F-32-NEXT: kxorq %k6, %k3, %k6
; AVX512F-32-NEXT: kmovd %edx, %k3
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
@@ -2232,7 +2232,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8
; AVX512F-32-NEXT: kmovd %ecx, %k5
; AVX512F-32-NEXT: movl %ebx, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k6, %k6
; AVX512F-32-NEXT: kshiftrq $15, %k6, %k6
@@ -2248,7 +2248,7 @@ define i64 @test_mask_cmp_b_512(<64 x i8
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k7 # 8-byte Reload
; AVX512F-32-NEXT: kxorq %k7, %k1, %k7
; AVX512F-32-NEXT: kmovd %edx, %k1
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
@@ -2667,7 +2667,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_
; AVX512F-32-NEXT: kmovd %edx, %k7
; AVX512F-32-NEXT: movl %ebp, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k4, %k4
; AVX512F-32-NEXT: kshiftrq $47, %k4, %k4
@@ -2682,7 +2682,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_
; AVX512F-32-NEXT: kshiftrq $18, %k4, %k3
; AVX512F-32-NEXT: kxorq %k6, %k3, %k6
; AVX512F-32-NEXT: kmovd %edx, %k3
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
@@ -2932,7 +2932,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_
; AVX512F-32-NEXT: kmovd %ecx, %k5
; AVX512F-32-NEXT: movl %ebx, %edx
; AVX512F-32-NEXT: shrl $24, %edx
-; AVX512F-32-NEXT: # kill: def %al killed %al killed %eax def %eax
+; AVX512F-32-NEXT: # kill: def $al killed $al killed $eax def $eax
; AVX512F-32-NEXT: shrb $7, %al
; AVX512F-32-NEXT: kshiftlq $63, %k6, %k6
; AVX512F-32-NEXT: kshiftrq $15, %k6, %k6
@@ -2948,7 +2948,7 @@ define i64 @test_mask_x86_avx512_ucmp_b_
; AVX512F-32-NEXT: kmovq {{[0-9]+}}(%esp), %k7 # 8-byte Reload
; AVX512F-32-NEXT: kxorq %k7, %k1, %k7
; AVX512F-32-NEXT: kmovd %edx, %k1
-; AVX512F-32-NEXT: # kill: def %dl killed %dl killed %edx def %edx
+; AVX512F-32-NEXT: # kill: def $dl killed $dl killed $edx def $edx
; AVX512F-32-NEXT: andb $15, %dl
; AVX512F-32-NEXT: andb $2, %al
; AVX512F-32-NEXT: shrb %al
Modified: llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll Wed Jan 31 14:04:26 2018
@@ -100,7 +100,7 @@ define <16 x i8> @test_mask_load_16xi8(<
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
ret <16 x i8> %res
@@ -114,7 +114,7 @@ define <32 x i8> @test_mask_load_32xi8(<
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovd %k0, %k1
; CHECK-NEXT: vmovdqu8 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.masked.load.v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> zeroinitializer)
ret <32 x i8> %res
@@ -129,7 +129,7 @@ define <8 x i16> @test_mask_load_8xi16(<
; CHECK-NEXT: kshiftld $24, %k0, %k0
; CHECK-NEXT: kshiftrd $24, %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
ret <8 x i16> %res
@@ -143,7 +143,7 @@ define <16 x i16> @test_mask_load_16xi16
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
; CHECK-NEXT: vmovdqu16 (%rdi), %zmm0 {%k1} {z}
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 killed %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 killed $zmm0
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.masked.load.v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
ret <16 x i16> %res
@@ -153,7 +153,7 @@ declare <16 x i16> @llvm.masked.load.v16
define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
; CHECK-LABEL: test_mask_store_16xi8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
@@ -167,7 +167,7 @@ declare void @llvm.masked.store.v16i8(<1
define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
; CHECK-LABEL: test_mask_store_32xi8:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; CHECK-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovd %k0, %k1
@@ -181,7 +181,7 @@ declare void @llvm.masked.store.v32i8(<3
define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
; CHECK-LABEL: test_mask_store_8xi16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
+; CHECK-NEXT: ## kill: def $xmm1 killed $xmm1 def $zmm1
; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
; CHECK-NEXT: vpmovw2m %zmm0, %k0
; CHECK-NEXT: kshiftld $24, %k0, %k0
@@ -196,7 +196,7 @@ declare void @llvm.masked.store.v8i16(<8
define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
; CHECK-LABEL: test_mask_store_16xi16:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
+; CHECK-NEXT: ## kill: def $ymm1 killed $ymm1 def $zmm1
; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
; CHECK-NEXT: vpmovb2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %k1
Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics-upgrade.ll Wed Jan 31 14:04:26 2018
@@ -503,7 +503,7 @@ define i16 @test_pcmpeq_w_256(<16 x i16>
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
@@ -516,7 +516,7 @@ define i16 @test_mask_pcmpeq_w_256(<16 x
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
@@ -555,7 +555,7 @@ define i16 @test_pcmpgt_w_256(<16 x i16>
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 -1)
@@ -568,7 +568,7 @@ define i16 @test_mask_pcmpgt_w_256(<16 x
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.w.256(<16 x i16> %a, <16 x i16> %b, i16 %mask)
@@ -582,7 +582,7 @@ define i16 @test_pcmpeq_b_128(<16 x i8>
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -594,7 +594,7 @@ define i16 @test_mask_pcmpeq_b_128(<16 x
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x74,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpeq.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -607,7 +607,7 @@ define i8 @test_pcmpeq_w_128(<8 x i16> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -619,7 +619,7 @@ define i8 @test_mask_pcmpeq_w_128(<8 x i
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpeqw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x75,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -632,7 +632,7 @@ define i16 @test_pcmpgt_b_128(<16 x i8>
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 -1)
ret i16 %res
@@ -644,7 +644,7 @@ define i16 @test_mask_pcmpgt_b_128(<16 x
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtb %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x64,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.mask.pcmpgt.b.128(<16 x i8> %a, <16 x i8> %b, i16 %mask)
ret i16 %res
@@ -657,7 +657,7 @@ define i8 @test_pcmpgt_w_128(<8 x i16> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 -1)
ret i8 %res
@@ -669,7 +669,7 @@ define i8 @test_mask_pcmpgt_w_128(<8 x i
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
; CHECK-NEXT: vpcmpgtw %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x65,0xc1]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.w.128(<8 x i16> %a, <8 x i16> %b, i8 %mask)
ret i8 %res
@@ -3683,7 +3683,7 @@ define i16 at test_int_x86_avx512_ptestm_b_
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
@@ -3721,7 +3721,7 @@ define i8 at test_int_x86_avx512_ptestm_w_1
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -3740,7 +3740,7 @@ define i16 at test_int_x86_avx512_ptestm_w_
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
@@ -3760,7 +3760,7 @@ define i16 at test_int_x86_avx512_ptestnm_b
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16 %x2)
%res1 = call i16 @llvm.x86.avx512.ptestnm.b.128(<16 x i8> %x0, <16 x i8> %x1, i16-1)
@@ -3798,7 +3798,7 @@ define i8 at test_int_x86_avx512_ptestnm_w_
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.w.128(<8 x i16> %x0, <8 x i16> %x1, i8-1)
@@ -3817,7 +3817,7 @@ define i16 at test_int_x86_avx512_ptestnm_w
; CHECK-NEXT: kmovd %k1, %ecx ## encoding: [0xc5,0xfb,0x93,0xc9]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
; CHECK-NEXT: addl %ecx, %eax ## encoding: [0x01,0xc8]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.ptestnm.w.256(<16 x i16> %x0, <16 x i16> %x1, i16 %x2)
@@ -3833,7 +3833,7 @@ define i16 at test_int_x86_avx512_cvtb2mask
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovb2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtb2mask.128(<16 x i8> %x0)
ret i16 %res
@@ -3859,7 +3859,7 @@ define i8 at test_int_x86_avx512_cvtw2mask_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %x0)
ret i8 %res
@@ -3872,7 +3872,7 @@ define i16 at test_int_x86_avx512_cvtw2mask
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovw2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x29,0xc0]
; CHECK-NEXT: kmovd %k0, %eax ## encoding: [0xc5,0xfb,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper ## encoding: [0xc5,0xf8,0x77]
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i16 @llvm.x86.avx512.cvtw2mask.256(<16 x i16> %x0)
Modified: llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-test-testn.ll Wed Jan 31 14:04:26 2018
@@ -7,7 +7,7 @@ define zeroext i16 @TEST_mm_test_epi8_ma
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -24,7 +24,7 @@ define zeroext i16 @TEST_mm_mask_test_ep
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -42,7 +42,7 @@ define zeroext i8 @TEST_mm_test_epi16_ma
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -59,7 +59,7 @@ define zeroext i8 @TEST_mm_mask_test_epi
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -77,7 +77,7 @@ define zeroext i16 @TEST_mm_testn_epi8_m
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -94,7 +94,7 @@ define zeroext i16 @TEST_mm_mask_testn_e
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmb %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -112,7 +112,7 @@ define zeroext i8 @TEST_mm_testn_epi16_m
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -129,7 +129,7 @@ define zeroext i8 @TEST_mm_mask_testn_ep
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %xmm0, %xmm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
entry:
%and.i.i = and <2 x i64> %__B, %__A
@@ -182,7 +182,7 @@ define zeroext i16 @TEST_mm256_test_epi1
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -200,7 +200,7 @@ define zeroext i16 @TEST_mm256_mask_test
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -254,7 +254,7 @@ define zeroext i16 @TEST_mm256_testn_epi
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -272,7 +272,7 @@ define zeroext i16 @TEST_mm256_mask_test
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vptestnmw %ymm0, %ymm1, %k0 {%k1}
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics-upgrade.ll Wed Jan 31 14:04:26 2018
@@ -151,7 +151,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x8_512(<8 x float> %x0, <16 x float> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x8_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinsertf32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -185,7 +185,7 @@ declare <8 x double> @llvm.x86.avx512.ma
define <8 x double>@test_int_x86_avx512_mask_broadcastf64x2_512(<2 x double> %x0, <8 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -220,7 +220,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x8_512(<8 x i32> %x0, <16 x i32> %x2, i16 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x8_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
+; CHECK-NEXT: ## kill: def $ymm0 killed $ymm0 def $zmm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vinserti32x8 $1, %ymm0, %zmm0, %zmm1 {%k1}
@@ -254,7 +254,7 @@ declare <8 x i64> @llvm.x86.avx512.mask.
define <8 x i64>@test_int_x86_avx512_mask_broadcasti64x2_512(<2 x i64> %x0, <8 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -289,7 +289,7 @@ declare <16 x float> @llvm.x86.avx512.ma
define <16 x float>@test_int_x86_avx512_mask_broadcastf32x2_512(<4 x float> %x0, <16 x float> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinsertf64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -311,7 +311,7 @@ declare <16 x i32> @llvm.x86.avx512.mask
define <16 x i32>@test_int_x86_avx512_mask_broadcasti32x2_512(<4 x i32> %x0, <16 x i32> %x2, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_512:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm2
; CHECK-NEXT: kmovw %edi, %k1
@@ -335,7 +335,7 @@ define i16 at test_int_x86_avx512_cvtd2mask
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.cvtd2mask.512(<16 x i32> %x0)
ret i16 %res
@@ -348,7 +348,7 @@ define i8 at test_int_x86_avx512_cvtq2mask_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.cvtq2mask.512(<8 x i64> %x0)
ret i8 %res
Modified: llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dq-intrinsics.ll Wed Jan 31 14:04:26 2018
@@ -351,7 +351,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclasspd $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.512(<8 x double> %x0, i32 4, i8 -1)
@@ -369,7 +369,7 @@ define i16 at test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclassps $4, %zmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addl %ecx, %eax
-; CHECK-NEXT: ## kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
; CHECK-NEXT: retq
%res = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 %x1)
%res1 = call i16 @llvm.x86.avx512.mask.fpclass.ps.512(<16 x float> %x0, i32 4, i16 -1)
@@ -388,7 +388,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclasssd $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
@@ -401,7 +401,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclasssd $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%x0 = load <2 x double>, <2 x double>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.sd(<2 x double> %x0, i32 4, i8 -1)
@@ -419,7 +419,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclassss $4, %xmm0, %k0
; CHECK-NEXT: kmovw %k0, %eax
; CHECK-NEXT: addb %cl, %al
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
@@ -432,7 +432,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK: ## %bb.0:
; CHECK-NEXT: vfpclassss $4, (%rdi), %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%x0 = load <4 x float>, <4 x float>* %x0ptr
%res = call i8 @llvm.x86.avx512.mask.fpclass.ss(<4 x float> %x0, i32 4, i8 -1)
Modified: llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll Wed Jan 31 14:04:26 2018
@@ -7,7 +7,7 @@ define i8 @mask8(i8 %x) {
; CHECK-NEXT: kmovd %edi, %k0
; CHECK-NEXT: knotb %k0, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%m0 = bitcast i8 %x to <8 x i1>
%m1 = xor <8 x i1> %m0, <i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1, i1 -1>
@@ -57,7 +57,7 @@ define i8 @mand8_mem(<8 x i1>* %x, <8 x
; CHECK-NEXT: kxorb %k1, %k0, %k0
; CHECK-NEXT: korb %k0, %k2, %k0
; CHECK-NEXT: kmovd %k0, %eax
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq
%ma = load <8 x i1>, <8 x i1>* %x
%mb = load <8 x i1>, <8 x i1>* %y
Modified: llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics-upgrade.ll Wed Jan 31 14:04:26 2018
@@ -1673,7 +1673,7 @@ declare <4 x double> @llvm.x86.avx512.ma
define <4 x double>@test_int_x86_avx512_mask_broadcastf64x2_256(<2 x double> %x0, <4 x double> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf64x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x18,0xc8,0x01]
@@ -1708,7 +1708,7 @@ declare <4 x i64> @llvm.x86.avx512.mask.
define <4 x i64>@test_int_x86_avx512_mask_broadcasti64x2_256(<2 x i64> %x0, <4 x i64> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti64x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti64x2 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x38,0xc8,0x01]
@@ -1743,7 +1743,7 @@ declare <8 x float> @llvm.x86.avx512.mas
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x2_256(<4 x float> %x0, <8 x float> %x2, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
@@ -1764,7 +1764,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x2_256(<4 x i32> %x0, <8 x i32> %x2, i8 %x3, i64 * %y_ptr) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x2_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vmovq (%rsi), %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x7e,0x16]
; CHECK-NEXT: ## xmm2 = mem[0],zero
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
@@ -1811,7 +1811,7 @@ define i8 at test_int_x86_avx512_cvtd2mask_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %xmm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %x0)
ret i8 %res
@@ -1824,7 +1824,7 @@ define i8 at test_int_x86_avx512_cvtd2mask_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovd2m %ymm0, %k0 ## encoding: [0x62,0xf2,0x7e,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtd2mask.256(<8 x i32> %x0)
ret i8 %res
@@ -1837,7 +1837,7 @@ define i8 at test_int_x86_avx512_cvtq2mask_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %xmm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x08,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.128(<2 x i64> %x0)
ret i8 %res
@@ -1850,7 +1850,7 @@ define i8 at test_int_x86_avx512_cvtq2mask_
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovq2m %ymm0, %k0 ## encoding: [0x62,0xf2,0xfe,0x28,0x39,0xc0]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.cvtq2mask.256(<4 x i64> %x0)
ret i8 %res
Modified: llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512dqvl-intrinsics.ll Wed Jan 31 14:04:26 2018
@@ -560,7 +560,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclassps $4, %xmm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x08,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.128(<4 x float> %x0, i32 4, i8 -1)
@@ -579,7 +579,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclassps $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0x7d,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.ps.256(<8 x float> %x0, i32 4, i8 -1)
@@ -598,7 +598,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclasspd $2, %xmm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x08,0x66,0xc0,0x02]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 4, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.128(<2 x double> %x0, i32 2, i8 -1)
@@ -617,7 +617,7 @@ define i8 @test_int_x86_avx512_mask_fpcl
; CHECK-NEXT: vfpclasspd $4, %ymm0, %k0 ## encoding: [0x62,0xf3,0xfd,0x28,0x66,0xc0,0x04]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 2, i8 %x1)
%res1 = call i8 @llvm.x86.avx512.mask.fpclass.pd.256(<4 x double> %x0, i32 4, i8 -1)
Modified: llvm/trunk/test/CodeGen/X86/avx512f-vec-test-testn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512f-vec-test-testn.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512f-vec-test-testn.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512f-vec-test-testn.ll Wed Jan 31 14:04:26 2018
@@ -7,7 +7,7 @@ define zeroext i8 @TEST_mm512_test_epi64
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -23,7 +23,7 @@ define zeroext i16 @TEST_mm512_test_epi3
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -41,7 +41,7 @@ define zeroext i8 @TEST_mm512_mask_test_
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -60,7 +60,7 @@ define zeroext i16 @TEST_mm512_mask_test
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -79,7 +79,7 @@ define zeroext i8 @TEST_mm512_testn_epi6
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -95,7 +95,7 @@ define zeroext i16 @TEST_mm512_testn_epi
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -113,7 +113,7 @@ define zeroext i8 @TEST_mm512_mask_testn
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmq %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %al killed %al killed %eax
+; CHECK-NEXT: # kill: def $al killed $al killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
@@ -132,7 +132,7 @@ define zeroext i16 @TEST_mm512_mask_test
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vptestnmd %zmm0, %zmm1, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: # kill: def %ax killed %ax killed %eax
+; CHECK-NEXT: # kill: def $ax killed $ax killed $eax
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
Modified: llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll?rev=323922&r1=323921&r2=323922&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics-upgrade.ll Wed Jan 31 14:04:26 2018
@@ -1064,7 +1064,7 @@ define i8 @test_pcmpeq_d_256(<8 x i32> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -1076,7 +1076,7 @@ define i8 @test_mask_pcmpeq_d_256(<8 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1089,7 +1089,7 @@ define i8 @test_pcmpeq_q_256(<4 x i64> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1101,7 +1101,7 @@ define i8 @test_mask_pcmpeq_q_256(<4 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1114,7 +1114,7 @@ define i8 @test_pcmpgt_d_256(<8 x i32> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x28,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 -1)
ret i8 %res
@@ -1126,7 +1126,7 @@ define i8 @test_mask_pcmpgt_d_256(<8 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.256(<8 x i32> %a, <8 x i32> %b, i8 %mask)
ret i8 %res
@@ -1139,7 +1139,7 @@ define i8 @test_pcmpgt_q_256(<4 x i64> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x28,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 -1)
ret i8 %res
@@ -1151,7 +1151,7 @@ define i8 @test_mask_pcmpgt_q_256(<4 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x29,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.256(<4 x i64> %a, <4 x i64> %b, i8 %mask)
ret i8 %res
@@ -1164,7 +1164,7 @@ define i8 @test_pcmpeq_d_128(<4 x i32> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1176,7 +1176,7 @@ define i8 @test_mask_pcmpeq_d_128(<4 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x76,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1189,7 +1189,7 @@ define i8 @test_pcmpeq_q_128(<2 x i64> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1201,7 +1201,7 @@ define i8 @test_mask_pcmpeq_q_128(<2 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpeqq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x29,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpeq.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -1214,7 +1214,7 @@ define i8 @test_pcmpgt_d_128(<4 x i32> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf1,0x7d,0x08,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 -1)
ret i8 %res
@@ -1226,7 +1226,7 @@ define i8 @test_mask_pcmpgt_d_128(<4 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0x66,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.d.128(<4 x i32> %a, <4 x i32> %b, i8 %mask)
ret i8 %res
@@ -1239,7 +1239,7 @@ define i8 @test_pcmpgt_q_128(<2 x i64> %
; CHECK: ## %bb.0:
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 ## encoding: [0x62,0xf2,0xfd,0x08,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 -1)
ret i8 %res
@@ -1251,7 +1251,7 @@ define i8 @test_mask_pcmpgt_q_128(<2 x i
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vpcmpgtq %xmm1, %xmm0, %k0 {%k1} ## encoding: [0x62,0xf2,0xfd,0x09,0x37,0xc1]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.mask.pcmpgt.q.128(<2 x i64> %a, <2 x i64> %b, i8 %mask)
ret i8 %res
@@ -5863,7 +5863,7 @@ declare <8 x float> @llvm.x86.avx512.mas
define <8 x float>@test_int_x86_avx512_mask_broadcastf32x4_256(<4 x float> %x0, <8 x float> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcastf32x4_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x18,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinsertf32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x18,0xc8,0x01]
@@ -5896,7 +5896,7 @@ declare <8 x i32> @llvm.x86.avx512.mask.
define <8 x i32>@test_int_x86_avx512_mask_broadcasti32x4_256(<4 x i32> %x0, <8 x i32> %x2, i8 %mask) {
; CHECK-LABEL: test_int_x86_avx512_mask_broadcasti32x4_256:
; CHECK: ## %bb.0:
-; CHECK-NEXT: ## kill: def %xmm0 killed %xmm0 def %ymm0
+; CHECK-NEXT: ## kill: def $xmm0 killed $xmm0 def $ymm0
; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 ## EVEX TO VEX Compression encoding: [0xc4,0xe3,0x7d,0x38,0xd0,0x01]
; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
; CHECK-NEXT: vinserti32x4 $1, %xmm0, %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x38,0xc8,0x01]
@@ -5999,7 +5999,7 @@ define i8 at test_int_x86_avx512_ptestm_d_1
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -6018,7 +6018,7 @@ define i8 at test_int_x86_avx512_ptestm_d_2
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -6037,7 +6037,7 @@ define i8 at test_int_x86_avx512_ptestm_q_1
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -6056,7 +6056,7 @@ define i8 at test_int_x86_avx512_ptestm_q_2
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
@@ -6075,7 +6075,7 @@ define i8 at test_int_x86_avx512_ptestnm_d_
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.128(<4 x i32> %x0, <4 x i32> %x1, i8-1)
@@ -6094,7 +6094,7 @@ define i8 at test_int_x86_avx512_ptestnm_d_
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.d.256(<8 x i32> %x0, <8 x i32> %x1, i8-1)
@@ -6113,7 +6113,7 @@ define i8 at test_int_x86_avx512_ptestnm_q_
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.128(<2 x i64> %x0, <2 x i64> %x1, i8-1)
@@ -6132,7 +6132,7 @@ define i8 at test_int_x86_avx512_ptestnm_q_
; CHECK-NEXT: kmovw %k1, %ecx ## encoding: [0xc5,0xf8,0x93,0xc9]
; CHECK-NEXT: kmovw %k0, %eax ## encoding: [0xc5,0xf8,0x93,0xc0]
; CHECK-NEXT: addb %cl, %al ## encoding: [0x00,0xc8]
-; CHECK-NEXT: ## kill: def %al killed %al killed %eax
+; CHECK-NEXT: ## kill: def $al killed $al killed $eax
; CHECK-NEXT: retq ## encoding: [0xc3]
%res = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8 %x2)
%res1 = call i8 @llvm.x86.avx512.ptestnm.q.256(<4 x i64> %x0, <4 x i64> %x1, i8-1)
More information about the llvm-commits
mailing list