[llvm] 4c7f819 - [ARM][LowOverheadLoops] Ensure memory predication

Sam Parker via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 5 05:19:25 PST 2020


Author: Sam Parker
Date: 2020-02-05T13:19:08Z
New Revision: 4c7f819204d8c52a25cc98a083791ad9db7d1ad9

URL: https://github.com/llvm/llvm-project/commit/4c7f819204d8c52a25cc98a083791ad9db7d1ad9
DIFF: https://github.com/llvm/llvm-project/commit/4c7f819204d8c52a25cc98a083791ad9db7d1ad9.diff

LOG: [ARM][LowOverheadLoops] Ensure memory predication

While validating each MVE instruction, check that all instructions
that touch memory are somehow predicated upon the VCTP.

Differential Revision: https://reviews.llvm.org/D73616

Added: 
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
    llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir

Modified: 
    llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
index 1a99e29663b9..23087012d378 100644
--- a/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
+++ b/llvm/lib/Target/ARM/ARMLowOverheadLoops.cpp
@@ -616,7 +616,8 @@ bool LowOverheadLoop::ValidateMVEInst(MachineInstr* MI) {
     return false;
   }
 
-  return true;
+  // Ensure that all memory operations are predicated.
+  return !IsUse && MI->mayLoadOrStore() ? false : true;
 }
 
 bool ARMLowOverheadLoops::runOnMachineFunction(MachineFunction &mf) {

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
new file mode 100644
index 000000000000..6cf0a2c26eeb
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-load.mir
@@ -0,0 +1,203 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve %s -run-pass=arm-low-overhead-loops -o - --verify-machineinstrs | FileCheck %s
+
+--- |
+  define dso_local arm_aapcs_vfpcc zeroext i8 @non_masked_load(i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) {
+  entry:
+    %cmp11 = icmp eq i32 %N, 0
+    %0 = add i32 %N, 15
+    %1 = lshr i32 %0, 4
+    %2 = shl nuw i32 %1, 4
+    %3 = add i32 %2, -16
+    %4 = lshr i32 %3, 4
+    %5 = add nuw nsw i32 %4, 1
+    br i1 %cmp11, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %5)
+    %6 = shl i32 %4, 3
+    %7 = sub i32 %N, %6
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv20 = phi i8* [ %scevgep21, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv = phi i8* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %vec.phi = phi <16 x i8> [ zeroinitializer, %vector.ph ], [ %13, %vector.body ]
+    %8 = phi i32 [ %5, %vector.ph ], [ %14, %vector.body ]
+    %9 = phi i32 [ %N, %vector.ph ], [ %11, %vector.body ]
+    %lsr.iv2022 = bitcast i8* %lsr.iv20 to <16 x i8>*
+    %lsr.iv19 = bitcast i8* %lsr.iv to <16 x i8>*
+    %10 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %9)
+    %11 = sub i32 %9, 16
+    %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv19, i32 1, <16 x i1> %10, <16 x i8> undef)
+    %wide.load16 = load <16 x i8>, <16 x i8>* %lsr.iv2022
+    %12 = add <16 x i8> %wide.masked.load, %vec.phi
+    %13 = add <16 x i8> %12, %wide.load16
+    %scevgep = getelementptr i8, i8* %lsr.iv, i32 16
+    %scevgep21 = getelementptr i8, i8* %lsr.iv20, i32 16
+    %14 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %8, i32 1)
+    %15 = icmp ne i32 %14, 0
+    br i1 %15, label %vector.body, label %middle.block
+
+  middle.block:                                     ; preds = %vector.body
+    %vec.phi.lcssa = phi <16 x i8> [ %vec.phi, %vector.body ]
+    %.lcssa = phi <16 x i8> [ %13, %vector.body ]
+    %16 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %7)
+    %17 = select <16 x i1> %16, <16 x i8> %.lcssa, <16 x i8> %vec.phi.lcssa
+    %18 = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %17)
+    br label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %middle.block, %entry
+    %res.0.lcssa = phi i8 [ 0, %entry ], [ %18, %middle.block ]
+    ret i8 %res.0.lcssa
+  }
+
+  declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>) #1
+  declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>) #2
+  declare void @llvm.set.loop.iterations.i32(i32) #3
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
+  declare <16 x i1> @llvm.arm.mve.vctp8(i32) #4
+
+...
+---
+name:            non_masked_load
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: non_masked_load
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2
+  ; CHECK:   tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 2, implicit-def $itstate
+  ; CHECK:   renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   renamable $r0 = tUXTB killed renamable $r0, 0, $cpsr, implicit killed $r0, implicit $itstate
+  ; CHECK:   tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   dead $r7 = frame-setup tMOVr $sp, 14, $noreg
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_register $r7
+  ; CHECK:   renamable $r3 = t2ADDri renamable $r2, 15, 14, $noreg, $noreg
+  ; CHECK:   renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+  ; CHECK:   renamable $r3 = t2BICri killed renamable $r3, 15, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r3, 16, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+  ; CHECK:   renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 35, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3 = t2LSRri killed renamable $r12, 4, 14, $noreg, $noreg
+  ; CHECK:   renamable $r3 = t2SUBrs renamable $r2, killed renamable $r3, 34, 14, $noreg, $noreg
+  ; CHECK:   $lr = t2DLS killed renamable $lr
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $lr, $q0, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg
+  ; CHECK:   $q1 = MVE_VORR killed $q0, killed $q0, 0, $noreg, undef $q1
+  ; CHECK:   MVE_VPST 2, implicit $vpr
+  ; CHECK:   renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, killed renamable $vpr :: (load 16 from %ir.lsr.iv2022, align 1)
+  ; CHECK:   renamable $r0, renamable $q2 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, $noreg :: (load 16 from %ir.lsr.iv19, align 1)
+  ; CHECK:   renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14, $noreg
+  ; CHECK:   renamable $q2 = MVE_VADDi8 killed renamable $q2, renamable $q1, 0, $noreg, undef renamable $q2
+  ; CHECK:   renamable $q0 = MVE_VADDi8 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.middle.block:
+  ; CHECK:   liveins: $q0, $q1, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP8 killed renamable $r3, 0, $noreg
+  ; CHECK:   renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+  ; CHECK:   renamable $r0 = MVE_VADDVu8no_acc killed renamable $q0, 0, $noreg
+  ; CHECK:   $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr
+  ; CHECK:   renamable $r0 = tUXTB killed renamable $r0, 14, $noreg
+  ; CHECK:   tBX_RET 14, $noreg, implicit killed $r0
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $lr
+
+    tCMPi8 renamable $r2, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 2, implicit-def $itstate
+    renamable $r0 = tMOVi8 $noreg, 0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    renamable $r0 = tUXTB killed renamable $r0, 0, $cpsr, implicit killed $r0, implicit $itstate
+    tBX_RET 0, killed $cpsr, implicit $r0, implicit killed $itstate
+    frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    $r7 = frame-setup tMOVr $sp, 14, $noreg
+    frame-setup CFI_INSTRUCTION def_cfa_register $r7
+    renamable $r3 = t2ADDri renamable $r2, 15, 14, $noreg, $noreg
+    renamable $q0 = MVE_VMOVimmi32 0, 0, $noreg, undef renamable $q0
+    renamable $r3 = t2BICri killed renamable $r3, 15, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r3, 16, 14, $noreg, $noreg
+    renamable $r3, dead $cpsr = tMOVi8 1, 14, $noreg
+    renamable $lr = nuw nsw t2ADDrs killed renamable $r3, renamable $r12, 35, 14, $noreg, $noreg
+    renamable $r3 = t2LSRri killed renamable $r12, 4, 14, $noreg, $noreg
+    renamable $r3 = t2SUBrs renamable $r2, killed renamable $r3, 34, 14, $noreg, $noreg
+    t2DoLoopStart renamable $lr
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $lr, $q0, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP8 renamable $r2, 0, $noreg
+    $q1 = MVE_VORR killed $q0, $q0, 0, $noreg, undef $q1
+    MVE_VPST 2, implicit $vpr
+    renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv2022, align 1)
+    renamable $r0, renamable $q2 = MVE_VLDRBU8_post killed renamable $r0, 16, 1, $noreg :: (load 16 from %ir.lsr.iv19, align 1)
+    renamable $r2, dead $cpsr = tSUBi8 killed renamable $r2, 16, 14, $noreg
+    renamable $q2 = MVE_VADDi8 killed renamable $q2, renamable $q1, 0, $noreg, undef renamable $q2
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    renamable $q0 = MVE_VADDi8 killed renamable $q2, killed renamable $q0, 0, $noreg, undef renamable $q0
+    t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.middle.block:
+    liveins: $q0, $q1, $r3
+
+    renamable $vpr = MVE_VCTP8 killed renamable $r3, 0, $noreg
+    renamable $q0 = MVE_VPSEL killed renamable $q0, killed renamable $q1, 0, killed renamable $vpr
+    renamable $r0 = MVE_VADDVu8no_acc killed renamable $q0, 0, $noreg
+    $sp = t2LDMIA_UPD $sp, 14, $noreg, def $r7, def $lr
+    renamable $r0 = tUXTB killed renamable $r0, 14, $noreg
+    tBX_RET 14, $noreg, implicit killed $r0
+
+...

diff  --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir
new file mode 100644
index 000000000000..1dfcc7788b24
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/non-masked-store.mir
@@ -0,0 +1,170 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - --verify-machineinstrs | FileCheck %s
+
+--- |
+  define dso_local arm_aapcs_vfpcc void @non_masked_store(i8* noalias nocapture %res, i8* nocapture readonly %a, i8* nocapture readonly %b, i32 %N) {
+  entry:
+    %cmp10 = icmp eq i32 %N, 0
+    %0 = add i32 %N, 15
+    %1 = lshr i32 %0, 4
+    %2 = shl nuw i32 %1, 4
+    %3 = add i32 %2, -16
+    %4 = lshr i32 %3, 4
+    %5 = add nuw nsw i32 %4, 1
+    br i1 %cmp10, label %for.cond.cleanup, label %vector.ph
+
+  vector.ph:                                        ; preds = %entry
+    call void @llvm.set.loop.iterations.i32(i32 %5)
+    br label %vector.body
+
+  vector.body:                                      ; preds = %vector.body, %vector.ph
+    %lsr.iv19 = phi i8* [ %scevgep20, %vector.body ], [ %res, %vector.ph ]
+    %lsr.iv16 = phi i8* [ %scevgep17, %vector.body ], [ %b, %vector.ph ]
+    %lsr.iv = phi i8* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+    %6 = phi i32 [ %5, %vector.ph ], [ %11, %vector.body ]
+    %7 = phi i32 [ %N, %vector.ph ], [ %9, %vector.body ]
+    %lsr.iv1921 = bitcast i8* %lsr.iv19 to <16 x i8>*
+    %lsr.iv1618 = bitcast i8* %lsr.iv16 to <16 x i8>*
+    %lsr.iv15 = bitcast i8* %lsr.iv to <16 x i8>*
+    %8 = call <16 x i1> @llvm.arm.mve.vctp8(i32 %7)
+    %9 = sub i32 %7, 16
+    %wide.masked.load = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv15, i32 1, <16 x i1> %8, <16 x i8> undef)
+    %wide.masked.load14 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %lsr.iv1618, i32 1, <16 x i1> %8, <16 x i8> undef)
+    %10 = add <16 x i8> %wide.masked.load14, %wide.masked.load
+    store <16 x i8> %10, <16 x i8>* %lsr.iv1921
+    %scevgep = getelementptr i8, i8* %lsr.iv, i32 16
+    %scevgep17 = getelementptr i8, i8* %lsr.iv16, i32 16
+    %scevgep20 = getelementptr i8, i8* %lsr.iv19, i32 16
+    %11 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
+    %12 = icmp ne i32 %11, 0
+    br i1 %12, label %vector.body, label %for.cond.cleanup
+
+  for.cond.cleanup:                                 ; preds = %vector.body, %entry
+    ret void
+  }
+
+  declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32 immarg, <16 x i1>, <16 x i8>)
+  declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32 immarg, <16 x i1>)
+  declare void @llvm.set.loop.iterations.i32(i32)
+  declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
+  declare <16 x i1> @llvm.arm.mve.vctp8(i32)
+
+...
+---
+name:            non_masked_store
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+failedISel:      false
+tracksRegLiveness: true
+hasWinCFI:       false
+registers:       []
+liveins:
+  - { reg: '$r0', virtual-reg: '' }
+  - { reg: '$r1', virtual-reg: '' }
+  - { reg: '$r2', virtual-reg: '' }
+  - { reg: '$r3', virtual-reg: '' }
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       8
+  offsetAdjustment: 0
+  maxAlignment:    4
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  cvBytesOfCalleeSavedRegisters: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  localFrameSize:  0
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:      []
+stack:
+  - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+  - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+      stack-id: default, callee-saved-register: '$r7', callee-saved-restored: true,
+      debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites:       []
+constants:       []
+machineFunctionInfo: {}
+body:             |
+  ; CHECK-LABEL: name: non_masked_store
+  ; CHECK: bb.0.entry:
+  ; CHECK:   successors: %bb.1(0x80000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r3
+  ; CHECK:   frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_offset 8
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $lr, -4
+  ; CHECK:   frame-setup CFI_INSTRUCTION offset $r7, -8
+  ; CHECK:   dead $r7 = frame-setup tMOVr $sp, 14, $noreg
+  ; CHECK:   frame-setup CFI_INSTRUCTION def_cfa_register $r7
+  ; CHECK:   tCMPi8 renamable $r3, 0, 14, $noreg, implicit-def $cpsr
+  ; CHECK:   t2IT 0, 8, implicit-def $itstate
+  ; CHECK:   tPOP_RET 0, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+  ; CHECK:   renamable $r12 = t2ADDri renamable $r3, 15, 14, $noreg, $noreg
+  ; CHECK:   renamable $lr = t2MOVi 1, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2BICri killed renamable $r12, 15, 14, $noreg, $noreg
+  ; CHECK:   renamable $r12 = t2SUBri killed renamable $r12, 16, 14, $noreg, $noreg
+  ; CHECK:   renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 35, 14, $noreg, $noreg
+  ; CHECK:   $lr = t2DLS killed renamable $lr
+  ; CHECK: bb.1.vector.body:
+  ; CHECK:   successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+  ; CHECK:   liveins: $lr, $r0, $r1, $r2, $r3
+  ; CHECK:   renamable $vpr = MVE_VCTP8 renamable $r3, 0, $noreg
+  ; CHECK:   renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 16, 14, $noreg
+  ; CHECK:   MVE_VPST 4, implicit $vpr
+  ; CHECK:   renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv15, align 1)
+  ; CHECK:   renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 1, killed renamable $vpr :: (load 16 from %ir.lsr.iv1618, align 1)
+  ; CHECK:   renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+  ; CHECK:   renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 1, $noreg :: (store 16 into %ir.lsr.iv1921, align 1)
+  ; CHECK:   $lr = t2LEUpdate killed renamable $lr, %bb.1
+  ; CHECK: bb.2.for.cond.cleanup:
+  ; CHECK:   tPOP_RET 14, $noreg, def $r7, def $pc
+  bb.0.entry:
+    successors: %bb.1(0x80000000)
+    liveins: $r0, $r1, $r2, $r3, $lr
+
+    frame-setup tPUSH 14, $noreg, killed $lr, implicit-def $sp, implicit $sp
+    frame-setup CFI_INSTRUCTION def_cfa_offset 8
+    frame-setup CFI_INSTRUCTION offset $lr, -4
+    frame-setup CFI_INSTRUCTION offset $r7, -8
+    $r7 = frame-setup tMOVr $sp, 14, $noreg
+    frame-setup CFI_INSTRUCTION def_cfa_register $r7
+    tCMPi8 renamable $r3, 0, 14, $noreg, implicit-def $cpsr
+    t2IT 0, 8, implicit-def $itstate
+    tPOP_RET 0, killed $cpsr, def $r7, def $pc, implicit killed $itstate
+    renamable $r12 = t2ADDri renamable $r3, 15, 14, $noreg, $noreg
+    renamable $lr = t2MOVi 1, 14, $noreg, $noreg
+    renamable $r12 = t2BICri killed renamable $r12, 15, 14, $noreg, $noreg
+    renamable $r12 = t2SUBri killed renamable $r12, 16, 14, $noreg, $noreg
+    renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 35, 14, $noreg, $noreg
+    t2DoLoopStart renamable $lr
+
+  bb.1.vector.body:
+    successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+    liveins: $lr, $r0, $r1, $r2, $r3
+
+    renamable $vpr = MVE_VCTP8 renamable $r3, 0, $noreg
+    renamable $r3, dead $cpsr = tSUBi8 killed renamable $r3, 16, 14, $noreg
+    MVE_VPST 4, implicit $vpr
+    renamable $r1, renamable $q0 = MVE_VLDRBU8_post killed renamable $r1, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv15, align 1)
+    renamable $r2, renamable $q1 = MVE_VLDRBU8_post killed renamable $r2, 16, 1, renamable $vpr :: (load 16 from %ir.lsr.iv1618, align 1)
+    renamable $lr = t2LoopDec killed renamable $lr, 1
+    renamable $q0 = MVE_VADDi8 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+    renamable $r0 = MVE_VSTRBU8_post killed renamable $q0, killed renamable $r0, 16, 1, $noreg :: (store 16 into %ir.lsr.iv1921, align 1)
+    t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
+    tB %bb.2, 14, $noreg
+
+  bb.2.for.cond.cleanup:
+    tPOP_RET 14, $noreg, def $r7, def $pc
+
+...


        


More information about the llvm-commits mailing list