[llvm] r352686 - MIR: Reject non-power-of-4 alignments in MMO parsing
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 30 15:09:28 PST 2019
Author: arsenm
Date: Wed Jan 30 15:09:28 2019
New Revision: 352686
URL: http://llvm.org/viewvc/llvm-project?rev=352686&view=rev
Log:
MIR: Reject non-power-of-4 alignments in MMO parsing
Added:
llvm/trunk/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir
Modified:
llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp
llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir
llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-srem.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir
llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-urem.mir
Modified: llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp (original)
+++ llvm/trunk/lib/CodeGen/MIRParser/MIParser.cpp Wed Jan 30 15:09:28 2019
@@ -2324,6 +2324,10 @@ bool MIParser::parseAlignment(unsigned &
if (getUnsigned(Alignment))
return true;
lex();
+
+ if (!isPowerOf2_32(Alignment))
+ return error("expected a power-of-2 literal after 'align'");
+
return false;
}
Modified: llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir Wed Jan 30 15:09:28 2019
@@ -474,7 +474,7 @@ fixedStack:
body: |
bb.0:
%0(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(s32) = G_LOAD %0(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %1(s32) = G_LOAD %0(p0) :: (load 4 from %fixed-stack.0, align 4)
%2(p0) = COPY $sp
%3(s32) = G_CONSTANT i32 8
Modified: llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir (original)
+++ llvm/trunk/test/CodeGen/ARM/fp16-litpool3-arm.mir Wed Jan 30 15:09:28 2019
@@ -82,11 +82,11 @@ constants:
#CHECK: CONSTPOOL_ENTRY 1, %const{{.*}}, 2
#
# We don't want to decrease alignment if the block already has been
-# aligned; this can e.g. be an existing CPE that has been carefully
-# aligned. Here BB.1.LA has already an 8-byte alignment, and we are
+# aligned; this can e.g. be an existing CPE that has been carefully
+# aligned. Here BB.1.LA has already an 8-byte alignment, and we are
# checking we don't set it to 4:
#
-#CHECK: bb.{{.*}}.LA (align 3):
+#CHECK: bb.{{.*}}.LA (align 8):
body: |
bb.0.entry:
@@ -100,7 +100,7 @@ body: |
FMSTAT 14, $noreg, implicit-def $cpsr, implicit killed $fpscr_nzcv
Bcc %bb.2, 0, killed $cpsr
- bb.1.LA (align 3):
+ bb.1.LA (align 8):
successors: %bb.2(0x80000000)
dead renamable $r0 = SPACE 1000, undef renamable $r0
Added: llvm/trunk/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir?rev=352686&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir (added)
+++ llvm/trunk/test/CodeGen/MIR/X86/expected-power-of-2-after-align.mir Wed Jan 30 15:09:28 2019
@@ -0,0 +1,12 @@
+# RUN: not llc -march=x86-64 -run-pass none -o /dev/null %s 2>&1 | FileCheck %s
+
+---
+name: align_0
+body: |
+ bb.0:
+
+ %0:_(p0) = IMPLICIT_DEF
+ ; CHECK: [[@LINE+1]]:50: expected a power-of-2 literal after 'align'
+ %1:_(s64) = G_LOAD %0(p0) :: (load 8, align 0)
+...
+
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/pointers.mir Wed Jan 30 15:09:28 2019
@@ -44,7 +44,7 @@ body: |
; MIPS32-LABEL: name: ptr_arg_on_stack
; MIPS32: liveins: $a0, $a1, $a2, $a3
; MIPS32: [[ADDiu:%[0-9]+]]:gpr32 = ADDiu %fixed-stack.0, 0
- ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 8)
; MIPS32: [[LW1:%[0-9]+]]:gpr32 = LW [[LW]], 0 :: (load 4 from %ir.p)
; MIPS32: $v0 = COPY [[LW1]]
; MIPS32: RetRA implicit $v0
@@ -53,7 +53,7 @@ body: |
%2:gprb(s32) = COPY $a2
%3:gprb(s32) = COPY $a3
%5:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- %4:gprb(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %4:gprb(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)
%6:gprb(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p)
$v0 = COPY %6(s32)
RetRA implicit $v0
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/instruction-select/stack_args.mir Wed Jan 30 15:09:28 2019
@@ -25,7 +25,7 @@ body: |
; MIPS32: [[COPY2:%[0-9]+]]:gpr32 = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:gpr32 = COPY $a3
; MIPS32: [[ADDiu:%[0-9]+]]:gpr32 = ADDiu %fixed-stack.0, 0
- ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LW:%[0-9]+]]:gpr32 = LW [[ADDiu]], 0 :: (load 4 from %fixed-stack.0, align 8)
; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
; MIPS32: $a0 = COPY [[COPY]]
; MIPS32: $a1 = COPY [[COPY1]]
@@ -35,7 +35,7 @@ body: |
; MIPS32: [[LUi:%[0-9]+]]:gpr32 = LUi 0
; MIPS32: [[ORi:%[0-9]+]]:gpr32 = ORi [[LUi]], 16
; MIPS32: [[ADDu:%[0-9]+]]:gpr32 = ADDu [[COPY4]], [[ORi]]
- ; MIPS32: SW [[LW]], [[ADDu]], 0 :: (store 4 into stack + 16, align 0)
+ ; MIPS32: SW [[LW]], [[ADDu]], 0 :: (store 4 into stack + 16)
; MIPS32: JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
; MIPS32: [[COPY5:%[0-9]+]]:gpr32 = COPY $v0
; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
@@ -46,7 +46,7 @@ body: |
%2:gprb(s32) = COPY $a2
%3:gprb(s32) = COPY $a3
%5:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- %4:gprb(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %4:gprb(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)
ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
$a0 = COPY %0(s32)
$a1 = COPY %1(s32)
@@ -55,7 +55,7 @@ body: |
%7:gprb(p0) = COPY $sp
%8:gprb(s32) = G_CONSTANT i32 16
%9:gprb(p0) = G_GEP %7, %8(s32)
- G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 0)
+ G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 4)
JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
%6:gprb(s32) = COPY $v0
ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/add.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/add.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/add.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/add.mir Wed Jan 30 15:09:28 2019
@@ -52,8 +52,8 @@ body: |
; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]]
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
- ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]]
- ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
; MIPS32: $v0 = COPY [[ASHR]](s32)
; MIPS32: RetRA implicit $v0
%2:_(s32) = COPY $a0
@@ -141,8 +141,8 @@ body: |
; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY3]]
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ADD]](s32)
- ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]]
- ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
; MIPS32: $v0 = COPY [[ASHR]](s32)
; MIPS32: RetRA implicit $v0
%2:_(s32) = COPY $a0
@@ -275,13 +275,13 @@ body: |
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0)
; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1, align 0)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 0)
+ ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2)
; MIPS32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3, align 0)
+ ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3)
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; MIPS32: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[LOAD]], [[COPY]]
; MIPS32: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
@@ -317,13 +317,13 @@ body: |
%5:_(s32) = COPY $a3
%0:_(s128) = G_MERGE_VALUES %2(s32), %3(s32), %4(s32), %5(s32)
%10:_(p0) = G_FRAME_INDEX %fixed-stack.3
- %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 0)
+ %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 4)
%11:_(p0) = G_FRAME_INDEX %fixed-stack.2
- %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 0)
+ %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 4)
%12:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 0)
+ %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 4)
%13:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 4)
%1:_(s128) = G_MERGE_VALUES %6(s32), %7(s32), %8(s32), %9(s32)
%14:_(s128) = G_ADD %1, %0
%15:_(s32), %16:_(s32), %17:_(s32), %18:_(s32) = G_UNMERGE_VALUES %14(s128)
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/pointers.mir Wed Jan 30 15:09:28 2019
@@ -44,7 +44,7 @@ body: |
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0)
; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.p)
; MIPS32: $v0 = COPY [[LOAD1]](s32)
; MIPS32: RetRA implicit $v0
@@ -53,7 +53,7 @@ body: |
%2:_(s32) = COPY $a2
%3:_(s32) = COPY $a3
%5:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 4)
%6:_(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p)
$v0 = COPY %6(s32)
RetRA implicit $v0
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/stack_args.mir Wed Jan 30 15:09:28 2019
@@ -23,7 +23,7 @@ body: |
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
; MIPS32: $a0 = COPY [[COPY]](s32)
; MIPS32: $a1 = COPY [[COPY1]](s32)
@@ -32,7 +32,7 @@ body: |
; MIPS32: [[COPY4:%[0-9]+]]:_(p0) = COPY $sp
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32: [[GEP:%[0-9]+]]:_(p0) = G_GEP [[COPY4]], [[C]](s32)
- ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16, align 0)
+ ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16)
; MIPS32: JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
; MIPS32: [[COPY5:%[0-9]+]]:_(s32) = COPY $v0
; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
@@ -43,7 +43,7 @@ body: |
%2:_(s32) = COPY $a2
%3:_(s32) = COPY $a3
%5:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)
ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
$a0 = COPY %0(s32)
$a1 = COPY %1(s32)
@@ -52,7 +52,7 @@ body: |
%7:_(p0) = COPY $sp
%8:_(s32) = G_CONSTANT i32 16
%9:_(p0) = G_GEP %7, %8(s32)
- G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 0)
+ G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 4)
JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
%6:_(s32) = COPY $v0
ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/legalizer/sub.mir Wed Jan 30 15:09:28 2019
@@ -52,8 +52,8 @@ body: |
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY2]], [[COPY3]]
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
- ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]]
- ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
; MIPS32: $v0 = COPY [[ASHR]](s32)
; MIPS32: RetRA implicit $v0
%2:_(s32) = COPY $a0
@@ -141,8 +141,8 @@ body: |
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY2]], [[COPY3]]
; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; MIPS32: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SUB]](s32)
- ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]]
- ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]]
+ ; MIPS32: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; MIPS32: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
; MIPS32: $v0 = COPY [[ASHR]](s32)
; MIPS32: RetRA implicit $v0
%2:_(s32) = COPY $a0
@@ -270,13 +270,13 @@ body: |
; MIPS32: [[COPY2:%[0-9]+]]:_(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:_(s32) = COPY $a3
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
; MIPS32: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1, align 0)
+ ; MIPS32: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
; MIPS32: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2
- ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 0)
+ ; MIPS32: [[LOAD2:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
; MIPS32: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3
- ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3, align 0)
+ ; MIPS32: [[LOAD3:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX3]](p0) :: (load 4 from %fixed-stack.3)
; MIPS32: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[LOAD]], [[COPY]]
; MIPS32: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[LOAD]](s32), [[COPY]]
; MIPS32: [[TRUNC:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
@@ -323,13 +323,13 @@ body: |
%5:_(s32) = COPY $a3
%0:_(s128) = G_MERGE_VALUES %2(s32), %3(s32), %4(s32), %5(s32)
%10:_(p0) = G_FRAME_INDEX %fixed-stack.3
- %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 0)
+ %6:_(s32) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.3, align 8)
%11:_(p0) = G_FRAME_INDEX %fixed-stack.2
- %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 0)
+ %7:_(s32) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.2, align 4)
%12:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 0)
+ %8:_(s32) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.1, align 8)
%13:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %9:_(s32) = G_LOAD %13(p0) :: (load 4 from %fixed-stack.0, align 4)
%1:_(s128) = G_MERGE_VALUES %6(s32), %7(s32), %8(s32), %9(s32)
%14:_(s128) = G_SUB %1, %0
%15:_(s32), %16:_(s32), %17:_(s32), %18:_(s32) = G_UNMERGE_VALUES %14(s128)
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/pointers.mir Wed Jan 30 15:09:28 2019
@@ -46,7 +46,7 @@ body: |
; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY $a3
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.p)
; MIPS32: $v0 = COPY [[LOAD1]](s32)
; MIPS32: RetRA implicit $v0
@@ -55,7 +55,7 @@ body: |
%2:_(s32) = COPY $a2
%3:_(s32) = COPY $a3
%5:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %4:_(p0) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)
%6:_(s32) = G_LOAD %4(p0) :: (load 4 from %ir.p)
$v0 = COPY %6(s32)
RetRA implicit $v0
Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/stack_args.mir Wed Jan 30 15:09:28 2019
@@ -24,7 +24,7 @@ body: |
; MIPS32: [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY $a3
; MIPS32: [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
- ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 0)
+ ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
; MIPS32: ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
; MIPS32: $a0 = COPY [[COPY]](s32)
; MIPS32: $a1 = COPY [[COPY1]](s32)
@@ -33,7 +33,7 @@ body: |
; MIPS32: [[COPY4:%[0-9]+]]:gprb(p0) = COPY $sp
; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 16
; MIPS32: [[GEP:%[0-9]+]]:gprb(p0) = G_GEP [[COPY4]], [[C]](s32)
- ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16, align 0)
+ ; MIPS32: G_STORE [[LOAD]](s32), [[GEP]](p0) :: (store 4 into stack + 16)
; MIPS32: JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
; MIPS32: [[COPY5:%[0-9]+]]:gprb(s32) = COPY $v0
; MIPS32: ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
@@ -44,7 +44,7 @@ body: |
%2:_(s32) = COPY $a2
%3:_(s32) = COPY $a3
%5:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 0)
+ %4:_(s32) = G_LOAD %5(p0) :: (load 4 from %fixed-stack.0, align 8)
ADJCALLSTACKDOWN 24, 0, implicit-def $sp, implicit $sp
$a0 = COPY %0(s32)
$a1 = COPY %1(s32)
@@ -53,7 +53,7 @@ body: |
%7:_(p0) = COPY $sp
%8:_(s32) = G_CONSTANT i32 16
%9:_(p0) = G_GEP %7, %8(s32)
- G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 0)
+ G_STORE %4(s32), %9(p0) :: (store 4 into stack + 16, align 4)
JAL @f, csr_o32, implicit-def $ra, implicit-def $sp, implicit $a0, implicit $a1, implicit $a2, implicit $a3, implicit-def $v0
%6:_(s32) = COPY $v0
ADJCALLSTACKUP 24, 0, implicit-def $sp, implicit $sp
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/select-memop-scalar-x32.mir Wed Jan 30 15:09:28 2019
@@ -57,12 +57,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i8
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 1 from %ir.p1)
; ALL: $al = COPY [[MOV8rm]]
; ALL: RET 0, implicit $al
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(s8) = G_LOAD %0(p0) :: (load 1 from %ir.p1)
$al = COPY %2(s8)
RET 0, implicit $al
@@ -82,12 +82,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i16
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 2 from %ir.p1)
; ALL: $ax = COPY [[MOV16rm]]
; ALL: RET 0, implicit $ax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(s16) = G_LOAD %0(p0) :: (load 2 from %ir.p1)
$ax = COPY %2(s16)
RET 0, implicit $ax
@@ -107,12 +107,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.p1)
; ALL: $eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(s32) = G_LOAD %0(p0) :: (load 4 from %ir.p1)
$eax = COPY %2(s32)
RET 0, implicit $eax
@@ -134,15 +134,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i8
- ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV8mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV8rm]] :: (store 1 into %ir.p1)
; ALL: $eax = COPY [[MOV32rm]]
; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %0(s8), %1(p0) :: (store 1 into %ir.p1)
$eax = COPY %1(p0)
RET 0, implicit $eax
@@ -164,15 +164,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i16
- ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV16mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV16rm]] :: (store 2 into %ir.p1)
; ALL: $eax = COPY [[MOV32rm]]
; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %0(s16), %1(p0) :: (store 2 into %ir.p1)
$eax = COPY %1(p0)
RET 0, implicit $eax
@@ -194,15 +194,15 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_i32
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV32mr [[MOV32rm1]], 1, $noreg, 0, $noreg, [[MOV32rm]] :: (store 4 into %ir.p1)
; ALL: $eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit $eax
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %0(s32), %1(p0) :: (store 4 into %ir.p1)
$eax = COPY %1(p0)
RET 0, implicit $eax
@@ -222,12 +222,12 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_load_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm [[MOV32rm]], 1, $noreg, 0, $noreg :: (load 4 from %ir.ptr1)
; ALL: $eax = COPY [[MOV32rm1]]
; ALL: RET 0, implicit $eax
%1(p0) = G_FRAME_INDEX %fixed-stack.0
- %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2(p0) = G_LOAD %0(p0) :: (load 4 from %ir.ptr1)
$eax = COPY %2(p0)
RET 0, implicit $eax
@@ -249,14 +249,14 @@ fixedStack:
body: |
bb.1 (%ir-block.0):
; ALL-LABEL: name: test_store_ptr
- ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; ALL: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; ALL: MOV32mr [[MOV32rm]], 1, $noreg, 0, $noreg, [[MOV32rm1]] :: (store 4 into %ir.ptr1)
; ALL: RET 0
%2(p0) = G_FRAME_INDEX %fixed-stack.1
- %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0(p0) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3(p0) = G_FRAME_INDEX %fixed-stack.0
- %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
G_STORE %1(p0), %0(p0) :: (store 4 into %ir.ptr1)
RET 0
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-inttoptr.mir Wed Jan 30 15:09:28 2019
@@ -26,12 +26,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: inttoptr_p0_s32
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[INTTOPTR:%[0-9]+]]:_(p0) = G_INTTOPTR [[LOAD]](s32)
; CHECK: $eax = COPY [[INTTOPTR]](p0)
; CHECK: RET 0, implicit $eax
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(p0) = G_INTTOPTR %0(s32)
$eax = COPY %2(p0)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-ptrtoint.mir Wed Jan 30 15:09:28 2019
@@ -45,13 +45,13 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s1_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s1) = G_PTRTOINT [[LOAD]](p0)
; CHECK: [[ANYEXT:%[0-9]+]]:_(s8) = G_ANYEXT [[PTRTOINT]](s1)
; CHECK: $al = COPY [[ANYEXT]](s8)
; CHECK: RET 0, implicit $al
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s1) = G_PTRTOINT %0(p0)
%3:_(s8) = G_ANYEXT %2(s1)
$al = COPY %3(s8)
@@ -74,12 +74,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s8_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s8) = G_PTRTOINT [[LOAD]](p0)
; CHECK: $al = COPY [[PTRTOINT]](s8)
; CHECK: RET 0, implicit $al
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s8) = G_PTRTOINT %0(p0)
$al = COPY %2(s8)
RET 0, implicit $al
@@ -101,12 +101,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s16_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s16) = G_PTRTOINT [[LOAD]](p0)
; CHECK: $ax = COPY [[PTRTOINT]](s16)
; CHECK: RET 0, implicit $ax
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s16) = G_PTRTOINT %0(p0)
$ax = COPY %2(s16)
RET 0, implicit $ax
@@ -128,12 +128,12 @@ body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s32_p0
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[PTRTOINT:%[0-9]+]]:_(s32) = G_PTRTOINT [[LOAD]](p0)
; CHECK: $eax = COPY [[PTRTOINT]](s32)
; CHECK: RET 0, implicit $eax
%1:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:_(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:_(s32) = G_PTRTOINT %0(p0)
$eax = COPY %2(s32)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-srem.mir Wed Jan 30 15:09:28 2019
@@ -70,16 +70,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i8
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: [[SREM:%[0-9]+]]:_(s8) = G_SREM [[LOAD]], [[LOAD1]]
; CHECK: $al = COPY [[SREM]](s8)
; CHECK: RET 0, implicit $al
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:_(s8) = G_SREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -132,16 +132,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i16
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: [[SREM:%[0-9]+]]:_(s16) = G_SREM [[LOAD]], [[LOAD1]]
; CHECK: $ax = COPY [[SREM]](s16)
; CHECK: RET 0, implicit $ax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:_(s16) = G_SREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -194,16 +194,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i32
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)
; CHECK: [[SREM:%[0-9]+]]:_(s32) = G_SREM [[LOAD]], [[LOAD1]]
; CHECK: $eax = COPY [[SREM]](s32)
; CHECK: RET 0, implicit $eax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:_(s32) = G_SREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-legalize-urem.mir Wed Jan 30 15:09:28 2019
@@ -70,16 +70,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i8
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 1 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: [[UREM:%[0-9]+]]:_(s8) = G_UREM [[LOAD]], [[LOAD1]]
; CHECK: $al = COPY [[UREM]](s8)
; CHECK: RET 0, implicit $al
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:_(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:_(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:_(s8) = G_UREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -132,16 +132,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i16
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 2 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: [[UREM:%[0-9]+]]:_(s16) = G_UREM [[LOAD]], [[LOAD1]]
; CHECK: $ax = COPY [[UREM]](s16)
; CHECK: RET 0, implicit $ax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:_(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:_(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:_(s16) = G_UREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -194,16 +194,16 @@ body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i32
; CHECK: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0
- ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX]](p0) :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1
- ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[FRAME_INDEX1]](p0) :: (invariant load 4 from %fixed-stack.1)
; CHECK: [[UREM:%[0-9]+]]:_(s32) = G_UREM [[LOAD]], [[LOAD1]]
; CHECK: $eax = COPY [[UREM]](s32)
; CHECK: RET 0, implicit $eax
%2:_(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:_(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:_(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:_(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:_(s32) = G_UREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-inttoptr.mir Wed Jan 30 15:09:28 2019
@@ -27,11 +27,11 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: inttoptr_p0_s32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: RET 0, implicit $eax
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(s32) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(p0) = G_INTTOPTR %0(s32)
$eax = COPY %2(p0)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-ptrtoint.mir Wed Jan 30 15:09:28 2019
@@ -46,12 +46,12 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s1_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32rm]].sub_8bit
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s1) = G_PTRTOINT %0(p0)
%3:gpr(s8) = G_ANYEXT %2(s1)
$al = COPY %3(s8)
@@ -75,12 +75,12 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s8_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32_abcd = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY [[MOV32rm]].sub_8bit
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s8) = G_PTRTOINT %0(p0)
$al = COPY %2(s8)
RET 0, implicit $al
@@ -103,12 +103,12 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s16_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: [[COPY:%[0-9]+]]:gr16 = COPY [[MOV32rm]].sub_16bit
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s16) = G_PTRTOINT %0(p0)
$ax = COPY %2(s16)
RET 0, implicit $ax
@@ -131,11 +131,11 @@ fixedStack:
body: |
bb.1.entry:
; CHECK-LABEL: name: ptrtoint_s32_p0
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: RET 0, implicit $eax
%1:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %0:gpr(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%2:gpr(s32) = G_PTRTOINT %0(p0)
$eax = COPY %2(s32)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-srem.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-srem.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-srem.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-srem.mir Wed Jan 30 15:09:28 2019
@@ -69,17 +69,17 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i8
- ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: $ax = MOVSX16rr8 [[MOV8rm]]
; CHECK: IDIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $ah
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:gpr(s8) = G_SREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -131,8 +131,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i16
- ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: $ax = COPY [[MOV16rm]]
; CHECK: CWD implicit-def $ax, implicit-def $dx, implicit $ax
; CHECK: IDIV16r [[MOV16rm1]], implicit-def $ax, implicit-def $dx, implicit-def $eflags, implicit $ax, implicit $dx
@@ -140,9 +140,9 @@ body: |
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:gpr(s16) = G_SREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -194,8 +194,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_srem_i32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: CDQ implicit-def $eax, implicit-def $edx, implicit $eax
; CHECK: IDIV32r [[MOV32rm1]], implicit-def $eax, implicit-def $edx, implicit-def $eflags, implicit $eax, implicit $edx
@@ -203,9 +203,9 @@ body: |
; CHECK: $eax = COPY [[COPY]]
; CHECK: RET 0, implicit $eax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:gpr(s32) = G_SREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-udiv.mir Wed Jan 30 15:09:28 2019
@@ -69,17 +69,17 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_udiv_i8
- ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: $ax = MOVZX16rr8 [[MOV8rm]]
; CHECK: DIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $al
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:gpr(s8) = G_UDIV %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -131,8 +131,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_udiv_i16
- ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: $ax = COPY [[MOV16rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $dx = COPY [[MOV32r0_]].sub_16bit
@@ -141,9 +141,9 @@ body: |
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:gpr(s16) = G_UDIV %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -195,8 +195,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_udiv_i32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0)
+ ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 16)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $edx = COPY [[MOV32r0_]]
@@ -205,9 +205,9 @@ body: |
; CHECK: $eax = COPY [[COPY]]
; CHECK: RET 0, implicit $eax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 4)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 16)
%4:gpr(s32) = G_UDIV %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
Modified: llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-urem.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-urem.mir?rev=352686&r1=352685&r2=352686&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-urem.mir (original)
+++ llvm/trunk/test/CodeGen/X86/GlobalISel/x86-select-urem.mir Wed Jan 30 15:09:28 2019
@@ -69,17 +69,17 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i8
- ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV8rm1:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 1 from %fixed-stack.1, align 4)
; CHECK: $ax = MOVZX16rr8 [[MOV8rm]]
; CHECK: DIV8r [[MOV8rm1]], implicit-def $al, implicit-def $ah, implicit-def $eflags, implicit $ax
; CHECK: [[COPY:%[0-9]+]]:gr8 = COPY $ah
; CHECK: $al = COPY [[COPY]]
; CHECK: RET 0, implicit $al
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0)
+ %0:gpr(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 0)
+ %1:gpr(s8) = G_LOAD %3(p0) :: (invariant load 1 from %fixed-stack.0, align 4)
%4:gpr(s8) = G_UREM %0, %1
$al = COPY %4(s8)
RET 0, implicit $al
@@ -131,8 +131,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i16
- ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV16rm:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV16rm1:%[0-9]+]]:gr16 = MOV16rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 2 from %fixed-stack.1, align 4)
; CHECK: $ax = COPY [[MOV16rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $dx = COPY [[MOV32r0_]].sub_16bit
@@ -141,9 +141,9 @@ body: |
; CHECK: $ax = COPY [[COPY]]
; CHECK: RET 0, implicit $ax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 0)
+ %0:gpr(s16) = G_LOAD %2(p0) :: (invariant load 2 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 0)
+ %1:gpr(s16) = G_LOAD %3(p0) :: (invariant load 2 from %fixed-stack.0, align 4)
%4:gpr(s16) = G_UREM %0, %1
$ax = COPY %4(s16)
RET 0, implicit $ax
@@ -195,8 +195,8 @@ constants:
body: |
bb.1 (%ir-block.0):
; CHECK-LABEL: name: test_urem_i32
- ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 0)
- ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1, align 0)
+ ; CHECK: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.0, align 16)
+ ; CHECK: [[MOV32rm1:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load 4 from %fixed-stack.1)
; CHECK: $eax = COPY [[MOV32rm]]
; CHECK: [[MOV32r0_:%[0-9]+]]:gr32 = MOV32r0 implicit-def $eflags
; CHECK: $edx = COPY [[MOV32r0_]]
@@ -205,9 +205,9 @@ body: |
; CHECK: $eax = COPY [[COPY]]
; CHECK: RET 0, implicit $eax
%2:gpr(p0) = G_FRAME_INDEX %fixed-stack.1
- %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 0)
+ %0:gpr(s32) = G_LOAD %2(p0) :: (invariant load 4 from %fixed-stack.1, align 16)
%3:gpr(p0) = G_FRAME_INDEX %fixed-stack.0
- %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0)
+ %1:gpr(s32) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 4)
%4:gpr(s32) = G_UREM %0, %1
$eax = COPY %4(s32)
RET 0, implicit $eax
More information about the llvm-commits
mailing list