[llvm] [X86][NFC] Change check from MIR to assemble for non EGPR instructions (PR #167275)

Phoebe Wang via llvm-commits llvm-commits at lists.llvm.org
Sun Nov 9 23:10:28 PST 2025


https://github.com/phoebewang created https://github.com/llvm/llvm-project/pull/167275

None

>From 4d35ba1ef9e08992f8bcedcc1fb1c88123afd8f5 Mon Sep 17 00:00:00 2001
From: Phoebe Wang <phoebe.wang at intel.com>
Date: Mon, 10 Nov 2025 15:05:41 +0800
Subject: [PATCH] [X86][NFC] Change check from MIR to assemble for non EGPR
 instructions

---
 llvm/test/CodeGen/X86/apx/no-rex2-general.ll  | 132 ++++++++++--------
 .../CodeGen/X86/apx/no-rex2-pseudo-amx.ll     |  29 ++--
 .../CodeGen/X86/apx/no-rex2-pseudo-x87.ll     |  31 ++--
 llvm/test/CodeGen/X86/apx/no-rex2-special.ll  | 113 ++++++++-------
 4 files changed, 169 insertions(+), 136 deletions(-)

diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-general.ll b/llvm/test/CodeGen/X86/apx/no-rex2-general.ll
index 805fc7ccaab76..02c181c1b05e4 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-general.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-general.ll
@@ -1,76 +1,90 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr  | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr  | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+ssse3,+egpr  | FileCheck %s --check-prefix=SSE
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
 
 define i32 @map0(ptr nocapture noundef readonly %a, i64 noundef %b) {
-  ; SSE-LABEL: name: map0
-  ; SSE: bb.0.entry:
-  ; SSE-NEXT:   liveins: $rdi, $rsi
-  ; SSE-NEXT: {{  $}}
-  ; SSE-NEXT:   [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
-  ; SSE-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
-  ; SSE-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
-  ; SSE-NEXT:   $eax = COPY [[MOV32rm]]
-  ; SSE-NEXT:   RET 0, $eax
-  ; AVX-LABEL: name: map0
-  ; AVX: bb.0.entry:
-  ; AVX-NEXT:   liveins: $rdi, $rsi
-  ; AVX-NEXT: {{  $}}
-  ; AVX-NEXT:   [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
-  ; AVX-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
-  ; AVX-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
-  ; AVX-NEXT:   $eax = COPY [[MOV32rm]]
-  ; AVX-NEXT:   RET 0, $eax
+; SSE-LABEL: map0:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    movq %rsi, %r16
+; SSE-NEXT:    movq %rdi, %r17
+; SSE-NEXT:    #APP
+; SSE-NEXT:    nop
+; SSE-NEXT:    #NO_APP
+; SSE-NEXT:    movl (%r17,%r16,4), %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: map0:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    movq %rsi, %r16
+; AVX-NEXT:    movq %rdi, %r17
+; AVX-NEXT:    #APP
+; AVX-NEXT:    nop
+; AVX-NEXT:    #NO_APP
+; AVX-NEXT:    movl (%r17,%r16,4), %eax
+; AVX-NEXT:    retq
 entry:
   %add.ptr = getelementptr inbounds i32, ptr %a, i64 %b
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   %0 = load i32, ptr %add.ptr
   ret i32 %0
 }
 
-define i32 @map1_or_vex(<2 x double> noundef %a) {
-  ; SSE-LABEL: name: map1_or_vex
-  ; SSE: bb.0.entry:
-  ; SSE-NEXT:   liveins: $xmm0
-  ; SSE-NEXT: {{  $}}
-  ; SSE-NEXT:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
-  ; SSE-NEXT:   [[CVTSD2SIrr_Int:%[0-9]+]]:gr32 = nofpexcept CVTSD2SIrr_Int [[COPY]], implicit $mxcsr
-  ; SSE-NEXT:   $eax = COPY [[CVTSD2SIrr_Int]]
-  ; SSE-NEXT:   RET 0, $eax
-  ; AVX-LABEL: name: map1_or_vex
-  ; AVX: bb.0.entry:
-  ; AVX-NEXT:   liveins: $xmm0
-  ; AVX-NEXT: {{  $}}
-  ; AVX-NEXT:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
-  ; AVX-NEXT:   [[VCVTSD2SIrr_Int:%[0-9]+]]:gr32_norex2 = nofpexcept VCVTSD2SIrr_Int [[COPY]], implicit $mxcsr
-  ; AVX-NEXT:   $eax = COPY [[VCVTSD2SIrr_Int]]
-  ; AVX-NEXT:   RET 0, $eax
+define i32 @map1_or_vex(<2 x double> noundef %a) nounwind {
+; SSE-LABEL: map1_or_vex:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    cvtsd2si %xmm0, %r16d
+; SSE-NEXT:    #APP
+; SSE-NEXT:    nop
+; SSE-NEXT:    #NO_APP
+; SSE-NEXT:    movl %r16d, %eax
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: map1_or_vex:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    vcvtsd2si %xmm0, %ebx
+; AVX-NEXT:    #APP
+; AVX-NEXT:    nop
+; AVX-NEXT:    #NO_APP
+; AVX-NEXT:    movl %ebx, %eax
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    retq
 entry:
   %0 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a)
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   ret i32 %0
 }
 
-define <2 x i64> @map2_or_vex(ptr nocapture noundef readonly %b, i64 noundef %c) {
-  ; SSE-LABEL: name: map2_or_vex
-  ; SSE: bb.0.entry:
-  ; SSE-NEXT:   liveins: $rdi, $rsi
-  ; SSE-NEXT: {{  $}}
-  ; SSE-NEXT:   [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
-  ; SSE-NEXT:   [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; SSE-NEXT:   [[PABSBrm:%[0-9]+]]:vr128 = PABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
-  ; SSE-NEXT:   $xmm0 = COPY [[PABSBrm]]
-  ; SSE-NEXT:   RET 0, $xmm0
-  ; AVX-LABEL: name: map2_or_vex
-  ; AVX: bb.0.entry:
-  ; AVX-NEXT:   liveins: $rdi, $rsi
-  ; AVX-NEXT: {{  $}}
-  ; AVX-NEXT:   [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
-  ; AVX-NEXT:   [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; AVX-NEXT:   [[VPABSBrm:%[0-9]+]]:vr128 = VPABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
-  ; AVX-NEXT:   $xmm0 = COPY [[VPABSBrm]]
-  ; AVX-NEXT:   RET 0, $xmm0
+define <2 x i64> @map2_or_vex(ptr nocapture noundef readonly %b, i64 noundef %c) nounwind {
+; SSE-LABEL: map2_or_vex:
+; SSE:       # %bb.0: # %entry
+; SSE-NEXT:    pushq %r14
+; SSE-NEXT:    pushq %rbx
+; SSE-NEXT:    movq %rsi, %rbx
+; SSE-NEXT:    movq %rdi, %r14
+; SSE-NEXT:    #APP
+; SSE-NEXT:    nop
+; SSE-NEXT:    #NO_APP
+; SSE-NEXT:    pabsb (%r14,%rbx,4), %xmm0
+; SSE-NEXT:    popq %rbx
+; SSE-NEXT:    popq %r14
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: map2_or_vex:
+; AVX:       # %bb.0: # %entry
+; AVX-NEXT:    pushq %r14
+; AVX-NEXT:    pushq %rbx
+; AVX-NEXT:    movq %rsi, %rbx
+; AVX-NEXT:    movq %rdi, %r14
+; AVX-NEXT:    #APP
+; AVX-NEXT:    nop
+; AVX-NEXT:    #NO_APP
+; AVX-NEXT:    vpabsb (%r14,%rbx,4), %xmm0
+; AVX-NEXT:    popq %rbx
+; AVX-NEXT:    popq %r14
+; AVX-NEXT:    retq
 entry:
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   %add.ptr = getelementptr inbounds i32, ptr %b, i64 %c
   %a = load <2 x i64>, ptr %add.ptr
   %0 = bitcast <2 x i64> %a to <16 x i8>
diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll
index 5fa4cb4c8826b..c193680607f76 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-amx.ll
@@ -1,17 +1,20 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+amx-tile,+egpr | FileCheck %s
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+amx-tile,+egpr | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+amx-tile,+egpr | FileCheck %s
 
-define dso_local void @amx(ptr noundef %data) {
-  ; CHECK-LABEL: name: amx
-  ; CHECK: bb.0.entry:
-  ; CHECK-NEXT:   liveins: $rdi
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; CHECK-NEXT:   [[MOV32ri64_:%[0-9]+]]:gr64_norex2_nosp = MOV32ri64 8
-  ; CHECK-NEXT:   PTILELOADD 4, [[COPY]], 1, killed [[MOV32ri64_]], 0, $noreg
-  ; CHECK-NEXT:   RET 0
-  entry:
+define dso_local void @amx(ptr noundef %data) nounwind {
+; CHECK-LABEL: amx:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    movq %rdi, %rbx
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    movl $8, %eax
+; CHECK-NEXT:    tileloadd (%rbx,%rax), %tmm4
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    retq
+entry:
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   call void @llvm.x86.tileloadd64(i8 4, ptr %data, i64 8)
   ret void
 }
diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll
index a9ca591a156c2..4692a58d095a6 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-pseudo-x87.ll
@@ -1,17 +1,22 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=-sse,+egpr | FileCheck %s
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=-sse,+egpr | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=-sse,+egpr | FileCheck %s
 
-define void @x87(ptr %0, ptr %1) {
-  ; CHECK-LABEL: name: x87
-  ; CHECK: bb.0 (%ir-block.2):
-  ; CHECK-NEXT:   liveins: $rdi, $rsi
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr64_norex2 = COPY $rsi
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; CHECK-NEXT:   [[LD_Fp32m:%[0-9]+]]:rfp32 = nofpexcept LD_Fp32m [[COPY1]], 1, $noreg, 0, $noreg, implicit-def dead $fpsw, implicit $fpcw :: (load (s32) from %ir.0)
-  ; CHECK-NEXT:   nofpexcept ST_Fp32m [[COPY]], 1, $noreg, 0, $noreg, killed [[LD_Fp32m]], implicit-def dead $fpsw, implicit $fpcw :: (store (s32) into %ir.1)
-  ; CHECK-NEXT:   RET 0
+define void @x87(ptr %0, ptr %1) nounwind {
+; CHECK-LABEL: x87:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %r14
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    movq %rsi, %rbx
+; CHECK-NEXT:    movq %rdi, %r14
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    flds (%r14)
+; CHECK-NEXT:    fstps (%rbx)
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    popq %r14
+; CHECK-NEXT:    retq
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{edx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   %3 = load float, ptr %0
   store float %3, ptr %1
   ret void
diff --git a/llvm/test/CodeGen/X86/apx/no-rex2-special.ll b/llvm/test/CodeGen/X86/apx/no-rex2-special.ll
index 86534427a9eae..f2025b5c8cbf8 100644
--- a/llvm/test/CodeGen/X86/apx/no-rex2-special.ll
+++ b/llvm/test/CodeGen/X86/apx/no-rex2-special.ll
@@ -1,70 +1,81 @@
-; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+xsave,+egpr  | FileCheck %s
-; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+xsave,+egpr  | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+xsave,+egpr  | FileCheck %s
 
-define void @test_xsave(ptr %ptr, i32 %hi, i32 %lo) {
-  ; CHECK-LABEL: name: test_xsave
-  ; CHECK: bb.0 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $rdi, $esi, $edx
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr32 = COPY $edx
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr32 = COPY $esi
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; CHECK-NEXT:   $edx = COPY [[COPY1]]
-  ; CHECK-NEXT:   $eax = COPY [[COPY]]
-  ; CHECK-NEXT:   XSAVE [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
-  ; CHECK-NEXT:   RET 0
+define void @test_xsave(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xsave:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    movl %edx, %r16d
+; CHECK-NEXT:    movl %esi, %edx
+; CHECK-NEXT:    movq %rdi, %rbx
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    movl %r16d, %eax
+; CHECK-NEXT:    xsave (%rbx)
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    retq
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   call void @llvm.x86.xsave(ptr %ptr, i32 %hi, i32 %lo)
   ret void;
 }
 declare void @llvm.x86.xsave(ptr, i32, i32)
 
-define void @test_xsave64(ptr %ptr, i32 %hi, i32 %lo) {
-  ; CHECK-LABEL: name: test_xsave64
-  ; CHECK: bb.0 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $rdi, $esi, $edx
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr32 = COPY $edx
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr32 = COPY $esi
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; CHECK-NEXT:   $edx = COPY [[COPY1]]
-  ; CHECK-NEXT:   $eax = COPY [[COPY]]
-  ; CHECK-NEXT:   XSAVE64 [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
-  ; CHECK-NEXT:   RET 0
+define void @test_xsave64(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xsave64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    movl %edx, %r16d
+; CHECK-NEXT:    movl %esi, %edx
+; CHECK-NEXT:    movq %rdi, %rbx
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    movl %r16d, %eax
+; CHECK-NEXT:    xsave64 (%rbx)
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    retq
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   call void @llvm.x86.xsave64(ptr %ptr, i32 %hi, i32 %lo)
   ret void;
 }
 declare void @llvm.x86.xsave64(ptr, i32, i32)
 
-define void @test_xrstor(ptr %ptr, i32 %hi, i32 %lo) {
-  ; CHECK-LABEL: name: test_xrstor
-  ; CHECK: bb.0 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $rdi, $esi, $edx
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr32 = COPY $edx
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr32 = COPY $esi
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; CHECK-NEXT:   $edx = COPY [[COPY1]]
-  ; CHECK-NEXT:   $eax = COPY [[COPY]]
-  ; CHECK-NEXT:   XRSTOR [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
-  ; CHECK-NEXT:   RET 0
+define void @test_xrstor(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xrstor:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    movl %edx, %r16d
+; CHECK-NEXT:    movl %esi, %edx
+; CHECK-NEXT:    movq %rdi, %rbx
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    movl %r16d, %eax
+; CHECK-NEXT:    xrstor (%rbx)
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    retq
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   call void @llvm.x86.xrstor(ptr %ptr, i32 %hi, i32 %lo)
   ret void;
 }
 declare void @llvm.x86.xrstor(ptr, i32, i32)
 
-define void @test_xrstor64(ptr %ptr, i32 %hi, i32 %lo) {
-  ; CHECK-LABEL: name: test_xrstor64
-  ; CHECK: bb.0 (%ir-block.0):
-  ; CHECK-NEXT:   liveins: $rdi, $esi, $edx
-  ; CHECK-NEXT: {{  $}}
-  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gr32 = COPY $edx
-  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gr32 = COPY $esi
-  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gr64_norex2 = COPY $rdi
-  ; CHECK-NEXT:   $edx = COPY [[COPY1]]
-  ; CHECK-NEXT:   $eax = COPY [[COPY]]
-  ; CHECK-NEXT:   XRSTOR64 [[COPY2]], 1, $noreg, 0, $noreg, implicit $edx, implicit $eax
-  ; CHECK-NEXT:   RET 0
+define void @test_xrstor64(ptr %ptr, i32 %hi, i32 %lo) nounwind {
+; CHECK-LABEL: test_xrstor64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    pushq %rbx
+; CHECK-NEXT:    movl %edx, %r16d
+; CHECK-NEXT:    movl %esi, %edx
+; CHECK-NEXT:    movq %rdi, %rbx
+; CHECK-NEXT:    #APP
+; CHECK-NEXT:    nop
+; CHECK-NEXT:    #NO_APP
+; CHECK-NEXT:    movl %r16d, %eax
+; CHECK-NEXT:    xrstor64 (%rbx)
+; CHECK-NEXT:    popq %rbx
+; CHECK-NEXT:    retq
+  tail call void asm sideeffect "nop", "~{eax},~{ecx},~{esi},~{edi},~{r8},~{r9},~{r10},~{r11}"()
   call void @llvm.x86.xrstor64(ptr %ptr, i32 %hi, i32 %lo)
   ret void;
 }



More information about the llvm-commits mailing list