[llvm] r326232 - [ARM] Another f16 litpool fix

Sjoerd Meijer via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 27 11:26:02 PST 2018


Author: sjoerdmeijer
Date: Tue Feb 27 11:26:02 2018
New Revision: 326232

URL: http://llvm.org/viewvc/llvm-project?rev=326232&view=rev
Log:
[ARM] Another f16 litpool fix

We were always setting the block alignment to 2 bytes in Thumb mode
and 4-bytes in ARM mode (r325754, and r325012), but this could cause 
reducing the block alignment when it already had been aligned (e.g. 
in Thumb mode when the block is a CPE that was already 4-byte aligned).

Patch by Momchil Velikov, I've only added a test.

Differential Revision: https://reviews.llvm.org/D43777

Added:
    llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir
Modified:
    llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp

Modified: llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp?rev=326232&r1=326231&r2=326232&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMConstantIslandPass.cpp Tue Feb 27 11:26:02 2018
@@ -1485,8 +1485,12 @@ bool ARMConstantIslands::handleConstantP
     // We are adding new water.  Update NewWaterList.
     NewWaterList.insert(NewIsland);
   }
-  // Always align the new block because CP entries can be smaller than 4 bytes.
-  NewMBB->setAlignment(isThumb ? 1 : 2);
+  // Always align the new block because CP entries can be smaller than 4
+  // bytes. Be careful not to decrease the existing alignment, e.g. NewMBB may
+  // be an already aligned constant pool block.
+  const unsigned Align = isThumb ? 1 : 2;
+  if (NewMBB->getAlignment() < Align)
+    NewMBB->setAlignment(Align);
 
   // Remove the original WaterList entry; we want subsequent insertions in
   // this vicinity to go after the one we're about to insert.  This

Added: llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir?rev=326232&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir (added)
+++ llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir Tue Feb 27 11:26:02 2018
@@ -0,0 +1,113 @@
+# RUN: llc -mtriple=arm-none-eabi -run-pass=arm-cp-islands %s -o - | FileCheck %s
+#
+# This checks alignment of a block when a CPE is placed before/after a
+# block (as e.g. opposed to splitting up a block), and also make sure
+# we don't decrease alignment.
+#
+--- |
+  ; ModuleID = '<stdin>'
+  source_filename = "<stdin>"
+  target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
+  target triple = "arm-arm--eabi"
+
+  declare i32 @llvm.arm.space(i32, i32) #0
+
+  define dso_local i32 @CP() #1 {
+  entry:
+    %res = alloca half, align 2
+    store half 0xH706B, half* %res, align 2
+    %0 = load half, half* %res, align 2
+    %tobool = fcmp une half %0, 0xH0000
+    br i1 %tobool, label %LA, label %END
+
+  LA:                                               ; preds = %entry
+    %1 = call i32 @llvm.arm.space(i32 1000, i32 undef)
+    br label %END
+
+  END:                                              ; preds = %LA, %entry
+    %2 = call i32 @llvm.arm.space(i32 100, i32 undef)
+    ret i32 42
+  }
+
+  ; Function Attrs: nounwind
+  declare void @llvm.stackprotector(i8*, i8**) #2
+
+  attributes #0 = { nounwind "target-features"="+v8.2a,+fullfp16" }
+  attributes #1 = { "target-features"="+v8.2a,+fullfp16" }
+  attributes #2 = { nounwind }
+
+...
+---
+name:            CP
+alignment:       2
+exposesReturnsTwice: false
+legalized:       false
+regBankSelected: false
+selected:        false
+tracksRegLiveness: true
+registers:
+liveins:
+frameInfo:
+  isFrameAddressTaken: false
+  isReturnAddressTaken: false
+  hasStackMap:     false
+  hasPatchPoint:   false
+  stackSize:       4
+  offsetAdjustment: 0
+  maxAlignment:    2
+  adjustsStack:    false
+  hasCalls:        false
+  stackProtector:  ''
+  maxCallFrameSize: 0
+  hasOpaqueSPAdjustment: false
+  hasVAStart:      false
+  hasMustTailInVarArgFunc: false
+  savePoint:       ''
+  restorePoint:    ''
+fixedStack:
+stack:
+  - { id: 0, name: res, type: default, offset: -2, size: 2, alignment: 2,
+      stack-id: 0, callee-saved-register: '', callee-saved-restored: true,
+      local-offset: -2, di-variable: '', di-expression: '', di-location: '' }
+constants:
+  - id:              0
+    value:           half 0xH706B
+    alignment:       2
+    isTargetSpecific: false
+
+
+#CHECK:  bb.{{.*}} (align 1):
+#CHECK:    successors:
+#CHECK:    CONSTPOOL_ENTRY 1, %const{{.*}}, 2
+#
+# We don't want to decrease alignment if the block already has been
+# aligned; this can e.g. be an existing CPE that has been carefully 
+# aligned. Here BB.1.LA has already an 8-byte alignment, and we are 
+# checking we don't set it to 4:
+#
+#CHECK:  bb.{{.*}}.LA (align 3):
+
+body:             |
+  bb.0.entry:
+    successors: %bb.1(0x50000000), %bb.2(0x30000000)
+
+    $sp = frame-setup SUBri $sp, 4, 14, $noreg, $noreg
+    frame-setup CFI_INSTRUCTION def_cfa_offset 4
+    renamable $s0 = VLDRH %const.0, 0, 14, $noreg :: (load 2 from constant-pool)
+    VCMPZH renamable $s0, 14, $noreg, implicit-def $fpscr_nzcv
+    VSTRH killed renamable $s0, $sp, 1, 14, $noreg :: (store 2 into %ir.res)
+    FMSTAT 14, $noreg, implicit-def $cpsr, implicit killed $fpscr_nzcv
+    Bcc %bb.2, 0, killed $cpsr
+
+  bb.1.LA (align 3):
+    successors: %bb.2(0x80000000)
+
+    dead renamable $r0 = SPACE 1000, undef renamable $r0
+
+  bb.2.END:
+    dead renamable $r0 = SPACE 100, undef renamable $r0
+    $r0 = MOVi 42, 14, $noreg, $noreg
+    $sp = ADDri $sp, 4, 14, $noreg, $noreg
+    BX_RET 14, $noreg, implicit killed $r0
+
+...




More information about the llvm-commits mailing list