[llvm] 1142e6c - [SelectionDAG] Add missing setValue calls in visitIntrinsicCall

Marco Elver via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 26 07:15:34 PST 2023


Author: Martin Fink
Date: 2023-01-26T16:13:46+01:00
New Revision: 1142e6c7c795de7f80774325a07ed49bc95a48c9

URL: https://github.com/llvm/llvm-project/commit/1142e6c7c795de7f80774325a07ed49bc95a48c9
DIFF: https://github.com/llvm/llvm-project/commit/1142e6c7c795de7f80774325a07ed49bc95a48c9.diff

LOG: [SelectionDAG] Add missing setValue calls in visitIntrinsicCall

Add missing setValue calls in SelectionDAGBuilder for mem-transfer
intrinsic calls. These setValue calls are required in order to propagate
pcsections metadata from IR to MIR.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D141048

Added: 
    llvm/test/CodeGen/AArch64/pcsections-memtransfer.ll
    llvm/test/CodeGen/X86/pcsections-memtransfer.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 0bdfdac6a65f..e6454dd49e31 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -5943,6 +5943,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
         /* AlwaysInline */ false, isTC, MachinePointerInfo(I.getArgOperand(0)),
         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
     updateDAGForMaybeTailCall(MC);
+    setValue(&I, MC);
     return;
   }
   case Intrinsic::memcpy_inline: {
@@ -5964,6 +5965,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
         /* AlwaysInline */ true, isTC, MachinePointerInfo(I.getArgOperand(0)),
         MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
     updateDAGForMaybeTailCall(MC);
+    setValue(&I, MC);
     return;
   }
   case Intrinsic::memset: {
@@ -5980,6 +5982,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
         Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
         isTC, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
     updateDAGForMaybeTailCall(MS);
+    setValue(&I, MS);
     return;
   }
   case Intrinsic::memset_inline: {
@@ -5998,6 +6001,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
                                MachinePointerInfo(I.getArgOperand(0)),
                                I.getAAMetadata());
     updateDAGForMaybeTailCall(MC);
+    setValue(&I, MC);
     return;
   }
   case Intrinsic::memmove: {
@@ -6019,6 +6023,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
                                 MachinePointerInfo(I.getArgOperand(1)),
                                 I.getAAMetadata(), AA);
     updateDAGForMaybeTailCall(MM);
+    setValue(&I, MM);
     return;
   }
   case Intrinsic::memcpy_element_unordered_atomic: {
@@ -6035,6 +6040,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
                             isTC, MachinePointerInfo(MI.getRawDest()),
                             MachinePointerInfo(MI.getRawSource()));
     updateDAGForMaybeTailCall(MC);
+    setValue(&I, MC);
     return;
   }
   case Intrinsic::memmove_element_unordered_atomic: {
@@ -6051,6 +6057,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
                              isTC, MachinePointerInfo(MI.getRawDest()),
                              MachinePointerInfo(MI.getRawSource()));
     updateDAGForMaybeTailCall(MC);
+    setValue(&I, MC);
     return;
   }
   case Intrinsic::memset_element_unordered_atomic: {
@@ -6066,6 +6073,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
         DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
                             isTC, MachinePointerInfo(MI.getRawDest()));
     updateDAGForMaybeTailCall(MC);
+    setValue(&I, MC);
     return;
   }
   case Intrinsic::call_preallocated_setup: {

diff  --git a/llvm/test/CodeGen/AArch64/pcsections-memtransfer.ll b/llvm/test/CodeGen/AArch64/pcsections-memtransfer.ll
new file mode 100644
index 000000000000..ce9facbc5fe6
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/pcsections-memtransfer.ll
@@ -0,0 +1,179 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc < %s -global-isel=0 -mtriple=aarch64-unknown-linux-gnu -stop-after=aarch64-expand-pseudo -verify-machineinstrs | FileCheck %s
+
+define i64 @call_memcpy_intrinsic(ptr %src, ptr %dst, i64 %len) {
+  ; CHECK-LABEL: name: call_memcpy_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $x0, $x1, $x2, $x19, $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   early-clobber $sp = frame-setup STPXpre killed $lr, killed $x19, $sp, -2 :: (store (s64) into %stack.1), (store (s64) into %stack.0)
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w19, -8
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w30, -16
+  ; CHECK-NEXT:   $x19 = ORRXrs $xzr, $x1, 0
+  ; CHECK-NEXT:   BL &memcpy, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit-def $sp, implicit-def dead $x0, pcsections !0
+  ; CHECK-NEXT:   renamable $x0 = LDRXui killed renamable $x19, 0 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   early-clobber $sp, $lr, $x19 = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.1), (load (s64) from %stack.0)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memcpy_intrinsic_sm(ptr %src, ptr %dst) {
+  ; CHECK-LABEL: name: call_memcpy_intrinsic_sm
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $x0, $x1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   renamable $w8 = LDRBBui renamable $x1, 0, pcsections !0 :: (volatile load (s8) from %ir.dst)
+  ; CHECK-NEXT:   STRBBui killed renamable $w8, killed renamable $x0, 0, pcsections !0 :: (volatile store (s8) into %ir.src)
+  ; CHECK-NEXT:   renamable $x0 = LDRXui killed renamable $x1, 0 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memcpy_inline_intrinsic(ptr %src, ptr %dst) {
+  ; CHECK-LABEL: name: call_memcpy_inline_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $x0, $x1
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   renamable $w8 = LDRBBui renamable $x1, 0, pcsections !0 :: (volatile load (s8) from %ir.dst)
+  ; CHECK-NEXT:   STRBBui killed renamable $w8, killed renamable $x0, 0, pcsections !0 :: (volatile store (s8) into %ir.src)
+  ; CHECK-NEXT:   renamable $x0 = LDRXui killed renamable $x1, 0 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memmove_intrinsic(ptr %src, ptr %dst, i64 %len) {
+  ; CHECK-LABEL: name: call_memmove_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $x0, $x1, $x2, $x19, $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   early-clobber $sp = frame-setup STPXpre killed $lr, killed $x19, $sp, -2 :: (store (s64) into %stack.1), (store (s64) into %stack.0)
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w19, -8
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w30, -16
+  ; CHECK-NEXT:   $x19 = ORRXrs $xzr, $x1, 0
+  ; CHECK-NEXT:   BL &memmove, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit $x1, implicit $x2, implicit-def $sp, implicit-def dead $x0, pcsections !0
+  ; CHECK-NEXT:   renamable $x0 = LDRXui killed renamable $x19, 0 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   early-clobber $sp, $lr, $x19 = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.1), (load (s64) from %stack.0)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  call void @llvm.memmove.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memset_intrinsic(ptr %dst, i64 %len) {
+  ; CHECK-LABEL: name: call_memset_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $x0, $x1, $x19, $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   early-clobber $sp = frame-setup STPXpre killed $lr, killed $x19, $sp, -2 :: (store (s64) into %stack.1), (store (s64) into %stack.0)
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w19, -8
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w30, -16
+  ; CHECK-NEXT:   $x2 = ORRXrs $xzr, $x1, 0
+  ; CHECK-NEXT:   $x19 = ORRXrs $xzr, $x0, 0
+  ; CHECK-NEXT:   $w1 = ORRWrs $wzr, $wzr, 0
+  ; CHECK-NEXT:   BL &memset, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit $w1, implicit $x2, implicit-def $sp, implicit-def dead $x0, pcsections !0
+  ; CHECK-NEXT:   renamable $x0 = LDRXui killed renamable $x19, 0 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   early-clobber $sp, $lr, $x19 = frame-destroy LDPXpost $sp, 2 :: (load (s64) from %stack.1), (load (s64) from %stack.0)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  call void @llvm.memset.p0.p0.i64(ptr %dst, i8 0, i64 %len, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memset_inline_intrinsic(ptr %dst) {
+  ; CHECK-LABEL: name: call_memset_inline_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $x0
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   STRBBui $wzr, renamable $x0, 0, pcsections !0 :: (volatile store (s8) into %ir.dst)
+  ; CHECK-NEXT:   renamable $x0 = LDRXui killed renamable $x0, 0 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  call void @llvm.memset.inline.p0.p0.i64(ptr %dst, i8 0, i64 1, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memcpy_element_unordered_atomic_intrinsic() {
+  ; CHECK-LABEL: name: call_memcpy_element_unordered_atomic_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   early-clobber $sp = frame-setup STRXpre killed $lr, $sp, -16 :: (store (s64) into %stack.2)
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w30, -16
+  ; CHECK-NEXT:   $x0 = ADDXri $sp, 12, 0
+  ; CHECK-NEXT:   $x1 = ADDXri $sp, 8, 0
+  ; CHECK-NEXT:   dead $w2 = MOVZWi 1, 0, implicit-def $x2
+  ; CHECK-NEXT:   BL &__llvm_memcpy_element_unordered_atomic_1, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit killed $x1, implicit killed $x2, implicit-def $sp, pcsections !0
+  ; CHECK-NEXT:   renamable $x0 = LDRXui $sp, 1 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  %src = alloca i32, align 1
+  %dst = alloca i32, align 1
+  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memmove_element_unordered_atomic_intrinsic() {
+  ; CHECK-LABEL: name: call_memmove_element_unordered_atomic_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   early-clobber $sp = frame-setup STRXpre killed $lr, $sp, -16 :: (store (s64) into %stack.2)
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w30, -16
+  ; CHECK-NEXT:   $x0 = ADDXri $sp, 12, 0
+  ; CHECK-NEXT:   $x1 = ADDXri $sp, 8, 0
+  ; CHECK-NEXT:   dead $w2 = MOVZWi 1, 0, implicit-def $x2
+  ; CHECK-NEXT:   BL &__llvm_memmove_element_unordered_atomic_1, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit killed $x1, implicit killed $x2, implicit-def $sp, pcsections !0
+  ; CHECK-NEXT:   renamable $x0 = LDRXui $sp, 1 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.2)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  %src = alloca i32, align 1
+  %dst = alloca i32, align 1
+  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memset_element_unordered_atomic_intrinsic() {
+  ; CHECK-LABEL: name: call_memset_element_unordered_atomic_intrinsic
+  ; CHECK: bb.0 (%ir-block.0):
+  ; CHECK-NEXT:   liveins: $lr
+  ; CHECK-NEXT: {{  $}}
+  ; CHECK-NEXT:   early-clobber $sp = frame-setup STRXpre killed $lr, $sp, -16 :: (store (s64) into %stack.1)
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; CHECK-NEXT:   frame-setup CFI_INSTRUCTION offset $w30, -16
+  ; CHECK-NEXT:   $x0 = ADDXri $sp, 12, 0
+  ; CHECK-NEXT:   $w1 = ORRWrs $wzr, $wzr, 0
+  ; CHECK-NEXT:   dead $w2 = MOVZWi 1, 0, implicit-def $x2
+  ; CHECK-NEXT:   BL &__llvm_memset_element_unordered_atomic_1, csr_aarch64_aapcs, implicit-def dead $lr, implicit $sp, implicit $x0, implicit killed $w1, implicit killed $x2, implicit-def $sp, pcsections !0
+  ; CHECK-NEXT:   renamable $x0 = LDURXi $sp, 12 :: (load (s64) from %ir.dst)
+  ; CHECK-NEXT:   early-clobber $sp, $lr = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
+  ; CHECK-NEXT:   RET undef $lr, implicit $x0
+  %dst = alloca i32, align 1
+  call void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr align 1 %dst, i8 0, i64 1, i32 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+
+!0 = !{!"foo"}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
+declare void @llvm.memset.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memset.inline.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, i8, i64, i32)

diff  --git a/llvm/test/CodeGen/X86/pcsections-memtransfer.ll b/llvm/test/CodeGen/X86/pcsections-memtransfer.ll
new file mode 100644
index 000000000000..408c3696656e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pcsections-memtransfer.ll
@@ -0,0 +1,348 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+; RUN: llc < %s -global-isel=0 -mtriple=x86_64-unknown-linux-gnu -stop-after=irtranslator -verify-machineinstrs | FileCheck %s -check-prefix=X64
+; RUN: llc < %s -global-isel=0 -mtriple=i686-unknown-linux-gnu -stop-after=irtranslator -verify-machineinstrs | FileCheck %s -check-prefix=X32
+
+define i64 @call_memcpy_intrinsic(ptr %src, ptr %dst, i64 %len) {
+  ; X64-LABEL: name: call_memcpy_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rdx, $rsi, $rbx
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X64-NEXT:   CFI_INSTRUCTION offset $rbx, -16
+  ; X64-NEXT:   $rbx = MOV64rr $rsi
+  ; X64-NEXT:   CALL64pcrel32 target-flags(x86-plt) &memcpy, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax, pcsections !0
+  ; X64-NEXT:   renamable $rax = MOV64rm killed renamable $rbx, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   $rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memcpy_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   liveins: $esi
+  ; X32-NEXT: {{  $}}
+  ; X32-NEXT:   frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; X32-NEXT:   $esp = frame-setup SUB32ri8 $esp, 8, implicit-def dead $eflags
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X32-NEXT:   CFI_INSTRUCTION offset $esi, -8
+  ; X32-NEXT:   renamable $esi = MOV32rm $esp, 1, $noreg, 20, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.3)
+  ; X32-NEXT:   $esp = SUB32ri8 $esp, 4, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.2, align 8), (store (s32) into stack + 8)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r renamable $esi, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.4, align 16), (store (s32) into stack)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   CALLpcrel32 &memcpy, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def dead $eax, pcsections !0
+  ; X32-NEXT:   $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset -16
+  ; X32-NEXT:   renamable $eax = MOV32rm renamable $esi, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm killed renamable $esi, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   $esp = frame-destroy ADD32ri8 $esp, 8, implicit-def dead $eflags
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X32-NEXT:   $esi = frame-destroy POP32r implicit-def $esp, implicit $esp
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+  ; X32-NEXT:   RET32 $eax, $edx
+  call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memcpy_intrinsic_sm(ptr %src, ptr %dst) {
+  ; X64-LABEL: name: call_memcpy_intrinsic_sm
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rsi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   $eax = MOVZX32rm8 renamable $rsi, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+  ; X64-NEXT:   MOV8mr killed renamable $rdi, 1, $noreg, 0, $noreg, killed renamable $al, pcsections !0 :: (volatile store (s8) into %ir.src)
+  ; X64-NEXT:   renamable $rax = MOV64rm killed renamable $rsi, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memcpy_intrinsic_sm
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.1, align 16)
+  ; X32-NEXT:   renamable $ecx = MOV32rm $esp, 1, $noreg, 8, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.0)
+  ; X32-NEXT:   $edx = MOVZX32rm8 renamable $ecx, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+  ; X32-NEXT:   MOV8mr killed renamable $eax, 1, $noreg, 0, $noreg, killed renamable $dl, pcsections !0 :: (volatile store (s8) into %ir.src)
+  ; X32-NEXT:   renamable $eax = MOV32rm renamable $ecx, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm killed renamable $ecx, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   RET32 $eax, $edx
+  call void @llvm.memcpy.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memcpy_inline_intrinsic(ptr %src, ptr %dst) {
+  ; X64-LABEL: name: call_memcpy_inline_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rsi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   $eax = MOVZX32rm8 renamable $rsi, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+  ; X64-NEXT:   MOV8mr killed renamable $rdi, 1, $noreg, 0, $noreg, killed renamable $al, pcsections !0 :: (volatile store (s8) into %ir.src)
+  ; X64-NEXT:   renamable $rax = MOV64rm killed renamable $rsi, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memcpy_inline_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.1, align 16)
+  ; X32-NEXT:   renamable $ecx = MOV32rm $esp, 1, $noreg, 8, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.0)
+  ; X32-NEXT:   $edx = MOVZX32rm8 renamable $ecx, 1, $noreg, 0, $noreg :: (volatile load (s8) from %ir.dst)
+  ; X32-NEXT:   MOV8mr killed renamable $eax, 1, $noreg, 0, $noreg, killed renamable $dl, pcsections !0 :: (volatile store (s8) into %ir.src)
+  ; X32-NEXT:   renamable $eax = MOV32rm renamable $ecx, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm killed renamable $ecx, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   RET32 $eax, $edx
+  call void @llvm.memcpy.inline.p0.p0.i64(ptr %src, ptr %dst, i64 1, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memmove_intrinsic(ptr %src, ptr %dst, i64 %len) {
+  ; X64-LABEL: name: call_memmove_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rdx, $rsi, $rbx
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X64-NEXT:   CFI_INSTRUCTION offset $rbx, -16
+  ; X64-NEXT:   $rbx = MOV64rr $rsi
+  ; X64-NEXT:   CALL64pcrel32 target-flags(x86-plt) &memmove, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax, pcsections !0
+  ; X64-NEXT:   renamable $rax = MOV64rm killed renamable $rbx, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   $rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memmove_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   liveins: $esi
+  ; X32-NEXT: {{  $}}
+  ; X32-NEXT:   frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; X32-NEXT:   $esp = frame-setup SUB32ri8 $esp, 8, implicit-def dead $eflags
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X32-NEXT:   CFI_INSTRUCTION offset $esi, -8
+  ; X32-NEXT:   renamable $esi = MOV32rm $esp, 1, $noreg, 20, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.3)
+  ; X32-NEXT:   $esp = SUB32ri8 $esp, 4, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.2, align 8), (store (s32) into stack + 8)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r renamable $esi, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32rmm $esp, 1, $noreg, 28, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.4, align 16), (store (s32) into stack)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   CALLpcrel32 &memmove, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def dead $eax, pcsections !0
+  ; X32-NEXT:   $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset -16
+  ; X32-NEXT:   renamable $eax = MOV32rm renamable $esi, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm killed renamable $esi, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   $esp = frame-destroy ADD32ri8 $esp, 8, implicit-def dead $eflags
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X32-NEXT:   $esi = frame-destroy POP32r implicit-def $esp, implicit $esp
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+  ; X32-NEXT:   RET32 $eax, $edx
+  call void @llvm.memmove.p0.p0.i64(ptr %src, ptr %dst, i64 %len, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memset_intrinsic(ptr %dst, i64 %len) {
+  ; X64-LABEL: name: call_memset_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi, $rsi, $rbx
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   frame-setup PUSH64r killed $rbx, implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X64-NEXT:   CFI_INSTRUCTION offset $rbx, -16
+  ; X64-NEXT:   $rdx = MOV64rr $rsi
+  ; X64-NEXT:   $rbx = MOV64rr $rdi
+  ; X64-NEXT:   $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags, pcsections !0
+  ; X64-NEXT:   CALL64pcrel32 target-flags(x86-plt) &memset, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit $rdx, implicit-def $rsp, implicit-def $ssp, implicit-def dead $rax, pcsections !0
+  ; X64-NEXT:   renamable $rax = MOV64rm killed renamable $rbx, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   $rbx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memset_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   liveins: $esi
+  ; X32-NEXT: {{  $}}
+  ; X32-NEXT:   frame-setup PUSH32r killed $esi, implicit-def $esp, implicit $esp
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; X32-NEXT:   $esp = frame-setup SUB32ri8 $esp, 8, implicit-def dead $eflags
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X32-NEXT:   CFI_INSTRUCTION offset $esi, -8
+  ; X32-NEXT:   renamable $esi = MOV32rm $esp, 1, $noreg, 16, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.3, align 16)
+  ; X32-NEXT:   $esp = SUB32ri8 $esp, 4, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32rmm $esp, 1, $noreg, 24, $noreg, implicit-def $esp, implicit $esp :: (load (s32) from %fixed-stack.2), (store (s32) into stack + 8)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r renamable $esi, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   CALLpcrel32 &memset, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, implicit-def dead $eax, pcsections !0
+  ; X32-NEXT:   $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset -16
+  ; X32-NEXT:   renamable $eax = MOV32rm renamable $esi, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm killed renamable $esi, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   $esp = frame-destroy ADD32ri8 $esp, 8, implicit-def dead $eflags
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X32-NEXT:   $esi = frame-destroy POP32r implicit-def $esp, implicit $esp
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+  ; X32-NEXT:   RET32 $eax, $edx
+  call void @llvm.memset.p0.p0.i64(ptr %dst, i8 0, i64 %len, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memset_inline_intrinsic(ptr %dst) {
+  ; X64-LABEL: name: call_memset_inline_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   liveins: $rdi
+  ; X64-NEXT: {{  $}}
+  ; X64-NEXT:   MOV8mi renamable $rdi, 1, $noreg, 0, $noreg, 0, pcsections !0 :: (volatile store (s8) into %ir.dst)
+  ; X64-NEXT:   renamable $rax = MOV64rm killed renamable $rdi, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memset_inline_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg, pcsections !0 :: (load (s32) from %fixed-stack.0, align 16)
+  ; X32-NEXT:   MOV8mi renamable $eax, 1, $noreg, 0, $noreg, 0, pcsections !0 :: (volatile store (s8) into %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm renamable $eax, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   renamable $eax = MOV32rm killed renamable $eax, 1, $noreg, 0, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   RET32 $eax, $edx
+  call void @llvm.memset.inline.p0.p0.i64(ptr %dst, i8 0, i64 1, i1 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memcpy_element_unordered_atomic_intrinsic() {
+  ; X64-LABEL: name: call_memcpy_element_unordered_atomic_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X64-NEXT:   renamable $rdi = LEA64r $rsp, 1, $noreg, 4, $noreg, pcsections !0
+  ; X64-NEXT:   $rsi = MOV64rr $rsp
+  ; X64-NEXT:   $edx = MOV32ri 1, implicit-def $rdx, pcsections !0
+  ; X64-NEXT:   CALL64pcrel32 target-flags(x86-plt) &__llvm_memcpy_element_unordered_atomic_1, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit killed $rdx, implicit-def $rsp, implicit-def $ssp, pcsections !0
+  ; X64-NEXT:   renamable $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memcpy_element_unordered_atomic_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   $esp = frame-setup SUB32ri8 $esp, 12, implicit-def dead $eflags
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X32-NEXT:   renamable $eax = LEA32r $esp, 1, $noreg, 4, $noreg, pcsections !0
+  ; X32-NEXT:   renamable $ecx = LEA32r $esp, 1, $noreg, 8, $noreg, pcsections !0
+  ; X32-NEXT:   PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 12)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32i8 1, implicit-def $esp, implicit $esp :: (store (s32) into stack + 8)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r killed renamable $eax, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r killed renamable $ecx, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   CALLpcrel32 &__llvm_memcpy_element_unordered_atomic_1, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, pcsections !0
+  ; X32-NEXT:   $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset -16
+  ; X32-NEXT:   renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm $esp, 1, $noreg, 8, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   $esp = frame-destroy ADD32ri8 $esp, 12, implicit-def dead $eflags
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+  ; X32-NEXT:   RET32 $eax, $edx
+  %src = alloca i32, align 1
+  %dst = alloca i32, align 1
+  call void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memmove_element_unordered_atomic_intrinsic() {
+  ; X64-LABEL: name: call_memmove_element_unordered_atomic_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X64-NEXT:   renamable $rdi = LEA64r $rsp, 1, $noreg, 4, $noreg, pcsections !0
+  ; X64-NEXT:   $rsi = MOV64rr $rsp
+  ; X64-NEXT:   $edx = MOV32ri 1, implicit-def $rdx, pcsections !0
+  ; X64-NEXT:   CALL64pcrel32 target-flags(x86-plt) &__llvm_memmove_element_unordered_atomic_1, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $rsi, implicit killed $rdx, implicit-def $rsp, implicit-def $ssp, pcsections !0
+  ; X64-NEXT:   renamable $rax = MOV64rm $rsp, 1, $noreg, 0, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memmove_element_unordered_atomic_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   $esp = frame-setup SUB32ri8 $esp, 12, implicit-def dead $eflags
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X32-NEXT:   renamable $eax = LEA32r $esp, 1, $noreg, 4, $noreg, pcsections !0
+  ; X32-NEXT:   renamable $ecx = LEA32r $esp, 1, $noreg, 8, $noreg, pcsections !0
+  ; X32-NEXT:   PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 12)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32i8 1, implicit-def $esp, implicit $esp :: (store (s32) into stack + 8)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r killed renamable $eax, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r killed renamable $ecx, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   CALLpcrel32 &__llvm_memmove_element_unordered_atomic_1, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, pcsections !0
+  ; X32-NEXT:   $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset -16
+  ; X32-NEXT:   renamable $eax = MOV32rm $esp, 1, $noreg, 4, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm $esp, 1, $noreg, 8, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   $esp = frame-destroy ADD32ri8 $esp, 12, implicit-def dead $eflags
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+  ; X32-NEXT:   RET32 $eax, $edx
+  %src = alloca i32, align 1
+  %dst = alloca i32, align 1
+  call void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr align 1 %src, ptr align 1 %dst, i64 1, i32 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+define i64 @call_memset_element_unordered_atomic_intrinsic() {
+  ; X64-LABEL: name: call_memset_element_unordered_atomic_intrinsic
+  ; X64: bb.0 (%ir-block.0):
+  ; X64-NEXT:   frame-setup PUSH64r undef $rax, implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X64-NEXT:   renamable $rdi = LEA64r $rsp, 1, $noreg, 4, $noreg, pcsections !0
+  ; X64-NEXT:   $edx = MOV32ri 1, implicit-def $rdx, pcsections !0
+  ; X64-NEXT:   $esi = XOR32rr undef $esi, undef $esi, implicit-def dead $eflags, pcsections !0
+  ; X64-NEXT:   CALL64pcrel32 target-flags(x86-plt) &__llvm_memset_element_unordered_atomic_1, csr_64, implicit $rsp, implicit $ssp, implicit $rdi, implicit $esi, implicit killed $rdx, implicit-def $rsp, implicit-def $ssp, pcsections !0
+  ; X64-NEXT:   renamable $rax = MOV64rm $rsp, 1, $noreg, 4, $noreg :: (load (s64) from %ir.dst)
+  ; X64-NEXT:   $rcx = frame-destroy POP64r implicit-def $rsp, implicit $rsp
+  ; X64-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 8
+  ; X64-NEXT:   RET64 $rax
+  ; X32-LABEL: name: call_memset_element_unordered_atomic_intrinsic
+  ; X32: bb.0 (%ir-block.0):
+  ; X32-NEXT:   $esp = frame-setup SUB32ri8 $esp, 12, implicit-def dead $eflags
+  ; X32-NEXT:   frame-setup CFI_INSTRUCTION def_cfa_offset 16
+  ; X32-NEXT:   renamable $eax = LEA32r $esp, 1, $noreg, 8, $noreg, pcsections !0
+  ; X32-NEXT:   PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 12)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32i8 1, implicit-def $esp, implicit $esp :: (store (s32) into stack + 8)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32i8 0, implicit-def $esp, implicit $esp :: (store (s32) into stack + 4)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   PUSH32r killed renamable $eax, implicit-def $esp, implicit $esp :: (store (s32) into stack)
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset 4
+  ; X32-NEXT:   CALLpcrel32 &__llvm_memset_element_unordered_atomic_1, csr_32, implicit $esp, implicit $ssp, implicit-def $esp, implicit-def $ssp, pcsections !0
+  ; X32-NEXT:   $esp = ADD32ri8 $esp, 16, implicit-def dead $eflags
+  ; X32-NEXT:   CFI_INSTRUCTION adjust_cfa_offset -16
+  ; X32-NEXT:   renamable $eax = MOV32rm $esp, 1, $noreg, 8, $noreg :: (load (s32) from %ir.dst)
+  ; X32-NEXT:   renamable $edx = MOV32rm $esp, 1, $noreg, 12, $noreg :: (load (s32) from %ir.dst + 4)
+  ; X32-NEXT:   $esp = frame-destroy ADD32ri8 $esp, 12, implicit-def dead $eflags
+  ; X32-NEXT:   frame-destroy CFI_INSTRUCTION def_cfa_offset 4
+  ; X32-NEXT:   RET32 $eax, $edx
+  %dst = alloca i32, align 1
+  call void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr align 1 %dst, i8 0, i64 1, i32 1), !pcsections !0
+  %val = load i64, ptr %dst
+  ret i64 %val
+}
+
+
+!0 = !{!"foo"}
+
+declare void @llvm.memcpy.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memcpy.inline.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i1)
+declare void @llvm.memmove.p0.p0.i64(ptr nocapture, ptr nocapture readonly, i64, i1)
+declare void @llvm.memset.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memset.inline.p0.p0.i64(ptr nocapture, i8, i64, i1)
+declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, ptr nocapture readonly, i64, i32)
+declare void @llvm.memset.element.unordered.atomic.p0.p0.i64(ptr nocapture writeonly, i8, i64, i32)


        


More information about the llvm-commits mailing list