[llvm] 719bac5 - [MIRParser] Diagnose too large align values in MachineMemOperands

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 24 07:36:46 PST 2022


Author: Jay Foad
Date: 2022-02-24T15:32:08Z
New Revision: 719bac55dff141c6f68fe3045f42b78a41e84d79

URL: https://github.com/llvm/llvm-project/commit/719bac55dff141c6f68fe3045f42b78a41e84d79
DIFF: https://github.com/llvm/llvm-project/commit/719bac55dff141c6f68fe3045f42b78a41e84d79.diff

LOG: [MIRParser] Diagnose too large align values in MachineMemOperands

When parsing MachineMemOperands, MIRParser treated the "align" keyword
the same as "basealign". Really "basealign" should specify the
alignment of the MachinePointerInfo base value, and "align" should
specify the alignment of that base value plus the offset.

This worked OK when the specified alignment was no larger than the
alignment of the offset, but in cases like this it just caused
confusion:

    STW killed %18, 4, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 4, align 8)

MIRPrinter would never have printed this, with an offset of 4 but an
align of 8, so it must have been written by hand. MIRParser would
interpret "align 8" as "basealign 8", but I think it is better to give
an error and force the user to write "basealign 8" if that is what they
really meant.

Differential Revision: https://reviews.llvm.org/D120400

Change-Id: I7eeeefc55c2df3554ba8d89f8809a2f45ada32d8

Added: 
    llvm/test/CodeGen/MIR/Generic/aligned-memoperands-err.mir
    llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir

Modified: 
    llvm/lib/CodeGen/MIRParser/MILexer.cpp
    llvm/lib/CodeGen/MIRParser/MIParser.cpp
    llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
    llvm/test/CodeGen/Mips/msa/emergency-spill.mir
    llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
    llvm/test/DebugInfo/AArch64/asan-stack-vars.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
index 0ca820f160aa4..0754ef82bae05 100644
--- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp
@@ -250,7 +250,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) {
       .Case("dereferenceable", MIToken::kw_dereferenceable)
       .Case("invariant", MIToken::kw_invariant)
       .Case("align", MIToken::kw_align)
-      .Case("basealign", MIToken::kw_align)
+      .Case("basealign", MIToken::kw_basealign)
       .Case("addrspace", MIToken::kw_addrspace)
       .Case("stack", MIToken::kw_stack)
       .Case("got", MIToken::kw_got)

diff  --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 26ae21a9b752e..be6c3f13821a6 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -3281,11 +3281,21 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
   MDNode *Range = nullptr;
   while (consumeIfPresent(MIToken::comma)) {
     switch (Token.kind()) {
-    case MIToken::kw_align:
+    case MIToken::kw_align: {
       // align is printed if it is 
diff erent than size.
-      if (parseAlignment(BaseAlignment))
+      uint64_t Alignment;
+      if (parseAlignment(Alignment))
         return true;
+      if (Ptr.Offset & (Alignment - 1)) {
+        // MachineMemOperand::getAlign never returns a value greater than the
+        // alignment of offset, so this just guards against hand-written MIR
+        // that specifies a large "align" value when it should probably use
+        // "basealign" instead.
+        return error("specified alignment is more aligned than offset");
+      }
+      BaseAlignment = Alignment;
       break;
+    }
     case MIToken::kw_basealign:
       // basealign is printed if it is 
diff erent than align.
       if (parseAlignment(BaseAlignment))

diff  --git a/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
index 8d4fcca8f741d..11925920b4570 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-global-load-store.mir
@@ -410,7 +410,7 @@ body:             |
     ; GCN-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY killed [[GLOBAL_LOAD_DWORDX2_]].sub0
     ; GCN-NEXT: S_NOP 0, implicit [[COPY]], implicit [[COPY1]]
     %0:vreg_64_align2 = IMPLICIT_DEF
-    %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef` + 4, align 8, addrspace 1)
+    %1:vgpr_32 = GLOBAL_LOAD_DWORD %0, 4, 0, implicit $exec :: (load (s32) from `i32 addrspace(1)* undef` + 4, basealign 8, addrspace 1)
     %2:vgpr_32 = GLOBAL_LOAD_DWORD %0, 0, 0, implicit $exec :: (load (s32) from `float addrspace(1)* undef`, align 4, addrspace 1)
     S_NOP 0, implicit %1, implicit %2
 ...

diff  --git a/llvm/test/CodeGen/MIR/Generic/aligned-memoperands-err.mir b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands-err.mir
new file mode 100644
index 0000000000000..e129d4c661961
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands-err.mir
@@ -0,0 +1,10 @@
+# RUN: not llc -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+
+---
+name: aligned_memoperands
+body: |
+  bb.0:
+    %0:_(p0) = IMPLICIT_DEF
+    ; CHECK: :[[@LINE+1]]:73: specified alignment is more aligned than offset
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 8)
+...

diff  --git a/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
new file mode 100644
index 0000000000000..7030eebffed9d
--- /dev/null
+++ b/llvm/test/CodeGen/MIR/Generic/aligned-memoperands.mir
@@ -0,0 +1,28 @@
+# RUN: llc -run-pass none -o - %s | FileCheck %s
+
+---
+name: aligned_memoperands
+body: |
+  bb.0:
+    ; CHECK-LABEL: name: aligned_memoperands
+    ; CHECK: [[DEF:%[0-9]+]]:_(p0) = IMPLICIT_DEF
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 2)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef`, align 8)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, align 2)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12)
+    ; CHECK-NEXT: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[DEF]](p0) :: (load (s32) from `i32* undef` + 12, basealign 8)
+    %0:_(p0) = IMPLICIT_DEF
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 2)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 4) ; redundant
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef`, align 8)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 2)
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, align 4) ; redundant
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 2) ; printed as "align"
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 4) ; redundant
+    %1:_(s32) = G_LOAD %0 :: (load (s32) from `i32* undef` + 12, basealign 8)
+...

diff  --git a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
index 10d993220de3d..9aefd3a7ddf98 100644
--- a/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
+++ b/llvm/test/CodeGen/Mips/msa/emergency-spill.mir
@@ -214,8 +214,8 @@ body:             |
     ST_B killed $w0, %stack.4.b.addr, 0 :: (store (s128) into %ir.b.addr)
     $w0 = LD_B %stack.4.b.addr, 0 :: (dereferenceable load (s128) from %ir.b.addr)
     ST_B killed $w0, %stack.0.retval, 0 :: (store (s128) into %ir.retval)
-    $v0_64 = LD %stack.0.retval, 0 :: (dereferenceable load (s64) from %ir.20, align 16)
-    $v1_64 = LD %stack.0.retval, 8 :: (dereferenceable load (s64) from %ir.20 + 8, align 16)
+    $v0_64 = LD %stack.0.retval, 0 :: (dereferenceable load (s64) from %ir.20, basealign 16)
+    $v1_64 = LD %stack.0.retval, 8 :: (dereferenceable load (s64) from %ir.20 + 8, basealign 16)
     RetRA implicit $v0_64, implicit $v1_64
 
 ...

diff  --git a/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir b/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
index 9cf3473a29ae5..5eceea7a5bf7f 100644
--- a/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
+++ b/llvm/test/CodeGen/PowerPC/stack-coloring-vararg.mir
@@ -152,11 +152,11 @@ body:             |
   bb.1.if.then6.i.i:
     LIFETIME_START %stack.1.ap2.i.i
     %17:gprc = LWZ 8, $zero :: (load (s32), align 8)
-    STW killed %17, 8, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 8, align 8)
+    STW killed %17, 8, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 8, basealign 8)
     %18:gprc = LWZ 4, $zero :: (load (s32))
-    STW killed %18, 4, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 4, align 8)
+    STW killed %18, 4, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i + 4, basealign 8)
     %19:gprc = LWZ 0, $zero :: (load (s32), align 8)
-    STW killed %19, 0, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i, align 8)
+    STW killed %19, 0, %stack.1.ap2.i.i :: (store (s32) into %stack.1.ap2.i.i, basealign 8)
     BLR implicit $lr, implicit $rm
 
   bb.2.format_reason_ap.exit.i:

diff  --git a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir
index d5f4ead7166dd..233ec00679aae 100644
--- a/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir
+++ b/llvm/test/DebugInfo/AArch64/asan-stack-vars.mir
@@ -434,9 +434,9 @@ body:             |
     renamable $x14 = ADDXri renamable $x10, 64, 0, debug-location !35
     $x15 = MOVZXi 35507, 0, debug-location !35
     $x15 = MOVKXi $x15, 16821, 16, debug-location !35
-    STRXui killed renamable $x15, $sp, 24, debug-location !35 :: (store (s768) into %stack.3.MyAlloca, align 32)
-    STRXui killed renamable $x9, $sp, 25, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 1, align 32)
-    STRXui killed renamable $x8, $sp, 26, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 2, align 32)
+    STRXui killed renamable $x15, $sp, 24, debug-location !35 :: (store (s768) into %stack.3.MyAlloca, basealign 32)
+    STRXui killed renamable $x9, $sp, 25, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 1, basealign 32)
+    STRXui killed renamable $x8, $sp, 26, debug-location !35 :: (store (s768) into %stack.3.MyAlloca + 2, basealign 32)
     renamable $x8 = UBFMXri renamable $x10, 3, 63, debug-location !35
     renamable $x9 = ADDXrs renamable $x11, renamable $x10, 67, debug-location !35
     $x15 = MOVZXi 61937, 0, debug-location !35


        


More information about the llvm-commits mailing list