[llvm-bugs] [Bug 33751] New: [fuzz] FrameSetup and FrameDestroy misordered, leading to machine verifier errors
via llvm-bugs
llvm-bugs at lists.llvm.org
Tue Jul 11 14:48:12 PDT 2017
https://bugs.llvm.org/show_bug.cgi?id=33751
Bug ID: 33751
Summary: [fuzz] FrameSetup and FrameDestroy misordered, leading
to machine verifier errors
Product: libraries
Version: trunk
Hardware: PC
OS: All
Status: NEW
Severity: enhancement
Priority: P
Component: Backend: AArch64
Assignee: unassignedbugs at nondot.org
Reporter: llvm-bugs at justinbogner.com
CC: llvm-bugs at lists.llvm.org
Created attachment 18779
--> https://bugs.llvm.org/attachment.cgi?id=18779&action=edit
Input bitcode
I was running my isel fuzzer on -O0 AArch64 (no global isel) and I hit some
"FrameSetup is after another FrameSetup" and "FrameDestroy is not after a
FrameSetup" machine verifier errors.
The input is attached as bitcode, and the problem can be reproduced like so:
llc -o - -mtriple arm64-apple-ios -O0 -verify-machineinstrs
./crash-8a5e55b1cc14bfa7087bdc27cc7759cb3cd618ab.bc
Here's the input as IR:
-----
; ModuleID = 'crash-8a5e55b1cc14bfa7087bdc27cc7759cb3cd618ab.bc'
source_filename = "Fuzzer input"
define void @f() {
%A14 = alloca i8**
%A25 = alloca i32
%L32 = load i32, i32* %A25
%L30 = load i32, i32* %A25
%G33 = getelementptr i32, i32* %A25, i32 %L32
%L29 = load i32, i32* %G33
%L28 = load i32, i32* %A25
%L27 = load i32, i32* %A25
%C30 = icmp ult i32 %L28, %L29
%B18 = sub i32 %L28, %L29
%L25 = load i32, i32* %A25
%C6 = icmp uge i32 %L25, %L27
%B31 = mul i32 %L30, %B18
%L26 = load i32, i32* %A25
%L24 = load i32, i32* undef
%B27 = xor i32 %L24, %L26
%A12 = alloca i16
%B28 = urem i32 %L30, %B31
%A27 = alloca i8*
%B11 = xor i32 %L25, %L25
%A11 = alloca i1, i32 %B28
%L31 = load i1, i1* %A11
%L23 = load i1, i1* %A11
%G32 = getelementptr i16, i16* %A12, i32 %B18
%B12 = or i32 %B27, %B27
%L17 = load i1, i1* %A11
%L16 = load i1, i1* %A11
%A20 = alloca i1*
%A10 = alloca i8
%G27 = getelementptr i8, i8* %A10, i1 %L16
%B17 = or i32 %B12, %B27
%L22 = load i8, i8* %G27
%L20 = load i8, i8* %A10
%L15 = load i8, i8* %A10
%B24 = udiv i32 %B17, %B27
%A16 = alloca i16
%B32 = lshr i1 %C6, %L31
%L21 = load i16, i16* %A16
%B29 = xor i32 %B11, %L27
%L18 = load i16, i16* %A16
%C9 = icmp ne i32 %B11, %B28
%B26 = xor i1 %L17, %L23
%G10 = getelementptr i16, i16* %A16, i8 %L22
%A9 = alloca i1, i32 %B12
%A13 = alloca double*, i32 %B29
%L11 = load double*, double** %A13
%L8 = load i16, i16* undef
%C2 = icmp uge i1 %L17, %C30
%B19 = ashr i16 %L8, %L18
%L7 = load i8, i8* undef
%B22 = urem i16 %B19, %L21
%B16 = and i8 %L7, %L15
%G24 = getelementptr i16, i16* %A16, i16 %L18
%A6 = alloca i16*
%A8 = alloca double, i32 %B17
%G9 = getelementptr double, double* %A8, i8 %L7
%L9 = load double, double* %A8
%A5 = alloca double, i32 %B24
%G13 = getelementptr double, double* %L11, i16 %L8
%G29 = getelementptr i8*, i8** %A27, i16 %L8
%L6 = load double, double* %A5
%B10 = frem double %L6, %L9
%B30 = sdiv i32 %B18, %B31
%A7 = alloca i8**, i32 %B31
%A4 = alloca i8*, i32 %B30
%A3 = alloca i16*, i32 %B11
%G18 = getelementptr i16*, i16** %A3, i8 %B16
%L3 = load i16, i16* undef
%A1 = alloca i16, i32 %B18
%L12 = load i16, i16* %A1
%G20 = getelementptr i1*, i1** %A20, i1 %L16
%C3 = icmp ne i32 %L28, %B17
%L5 = load i16, i16* %A1
%C10 = icmp uge i32 %B11, %B17
%G6 = getelementptr i8*, i8** %G29, i16 %L5
%B25 = ashr i32 %L29, %L27
%B2 = udiv i8 %L20, %B16
%L4 = load i16, i16* %A1
%G23 = getelementptr double*, double** %A13, i16 %L5
%B33 = ashr i8 %B2, %L15
%C5 = icmp slt i16 %L3, %B22
%C4 = icmp eq i1 %B32, %C5
%G2 = getelementptr i16*, i16** %G18, i16 %L3
%L2 = load i16, i16* %A1
%B23 = srem i8 %L20, %L22
%A2 = alloca i8, i32 %B25
%L13 = load i8, i8* %A2
%A = alloca i8
%C7 = icmp sge i8 %B23, %B2
%G21 = getelementptr i16*, i16** %A3, i1 %C30
%L1 = load i8, i8* %A
%L = load i8, i8* %G27
%B3 = and i16 %L8, %L12
%G25 = getelementptr i16, i16* %A1, i16 %L4
%G30 = getelementptr i16*, i16** %G2, i1 %B26
%B = sub i8 %L, %L1
store i8 %B, i8* %A2
%G11 = getelementptr i16*, i16** %A6, i8 %B
%G28 = getelementptr double*, double** %A13, i8 %L1
%G = getelementptr i16, i16* %A1, i1 %C3
%L14 = load i16, i16* %G24
%L10 = load i16, i16* %G
%G16 = getelementptr i8*, i8** %A4, i8 %L7
store i1 %C6, i1* undef
store i16* %G, i16** %G2
%B1 = urem i16 %L2, %L2
%G7 = getelementptr double, double* %G13, i1 %C7
%G19 = getelementptr i1, i1* %A9, i16 %L5
%G4 = getelementptr i16, i16* %A1, i16 %L4
%L19 = load i16, i16* %G4
%B20 = shl i16 %L12, %L19
%G3 = getelementptr i8, i8* %A, i16 %B20
%G5 = getelementptr i8*, i8** %G16, i1 %C10
%B21 = ashr i8 %B16, %B33
%B35 = lshr i16 %L19, %L4
%C11 = icmp slt i16 %L3, %L4
%G1 = getelementptr i8, i8* %A, i16 %B35
store i8* %G1, i8** %A4
store i8* %G3, i8** undef
%B4 = srem i16 %L10, %L14
store i16* %G10, i16** undef
%C1 = fcmp uno double %L6, %L6
%G8 = getelementptr i16, i16* %G4, i16 %B4
%G17 = getelementptr double, double* %A8, i16 %B3
%B14 = and i8 %B21, %L13
store i8** %G6, i8*** %A7
%G14 = getelementptr i16*, i16** %A3, i16 %L10
%G15 = getelementptr double*, double** %A13, i16 %L8
store double %B10, double* %G9
store i16* %G32, i16** %G11
store double* %G7, double** %G23
store i16* %G8, i16** %G14
store double* %G17, double** %G15
store i8** %G16, i8*** undef
%B6 = add i16 %L3, %L12
%B7 = xor i8 %B21, %L7
%G26 = getelementptr i8, i8* %G3, i16 %B3
store i8 %B14, i8* undef
%G22 = getelementptr double, double* %A8, i1 %C9
%B15 = urem i16 %B1, %L14
store i1 %C2, i1* %A9
store i16 %B15, i16* %G25
%B5 = ashr i16 %B6, %L10
%B9 = udiv i16 %L3, %B22
%G12 = getelementptr i16*, i16** %G30, i1 %C1
store i16 %B5, i16* undef
store i16** %G12, i16*** undef
store i8 %B2, i8* %A2
store i1* %G19, i1** %G20
%G31 = getelementptr i8*, i8** %G16, i8 %L22
store i1 %C4, i1* %A11
store i16** %G21, i16*** undef
%C = icmp sgt i1 %C5, %B26
store double* %G22, double** undef
store i8* %G26, i8** %G31
store i8 %B7, i8* %G26
%B8 = ashr i8 %B21, %L
store i8 %B8, i8* undef
store i1 %C, i1* undef
store i16 %B9, i16* %A12
store i1 %C11, i1* %A9
store double** %G28, double*** undef
store i8** %G5, i8*** %A14
ret void
}
------
And here's the errors we get from the verifier:
------
# After Instruction Selection
# Machine code for function f: IsSSA, TracksLiveness
Frame Objects:
fi#0: size=8, align=8, at location [SP]
fi#1: size=4, align=4, at location [SP]
fi#2: size=2, align=2, at location [SP]
fi#3: size=8, align=8, at location [SP]
fi#4: variable sized, align=1, at location [SP]
fi#5: size=8, align=8, at location [SP]
fi#6: size=1, align=1, at location [SP]
fi#7: size=2, align=2, at location [SP]
fi#8: variable sized, align=1, at location [SP]
fi#9: variable sized, align=1, at location [SP]
fi#10: size=8, align=8, at location [SP]
fi#11: variable sized, align=1, at location [SP]
fi#12: variable sized, align=1, at location [SP]
fi#13: variable sized, align=1, at location [SP]
fi#14: variable sized, align=1, at location [SP]
fi#15: variable sized, align=1, at location [SP]
fi#16: variable sized, align=1, at location [SP]
fi#17: variable sized, align=1, at location [SP]
fi#18: size=1, align=1, at location [SP]
BB#0: derived from LLVM BB %0
%vreg2<def> = IMPLICIT_DEF; GPR64sp:%vreg2
%vreg8<def> = IMPLICIT_DEF; GPR64sp:%vreg8
%vreg11<def> = IMPLICIT_DEF; GPR64sp:%vreg11
%vreg22<def> = IMPLICIT_DEF; GPR64sp:%vreg22
%vreg29<def> = IMPLICIT_DEF; GPR64sp:%vreg29
%vreg46<def> = IMPLICIT_DEF; GPR64sp:%vreg46
%vreg49<def> = LDRSWui <fi#1>, 0; mem:LD4[%A25](dereferenceable)
GPR64:%vreg49
%vreg50<def> = COPY %vreg49:sub_32; GPR32:%vreg50 GPR64:%vreg49
%vreg51<def> = ADDXri <fi#1>, 0, 0; GPR64sp:%vreg51
%vreg52<def> = LDRWroX %vreg51<kill>, %vreg49, 0, 1; mem:LD4[%G33]
GPR32:%vreg52 GPR64sp:%vreg51 GPR64:%vreg49
%vreg53<def> = SUBSWrr %vreg50, %vreg52, %NZCV<imp-def>;
GPR32:%vreg53,%vreg50,%vreg52
%vreg54<def> = CSINCWr %WZR, %WZR, 3, %NZCV<imp-use>; GPR32:%vreg54
%vreg55<def> = MADDWrrr %vreg50, %vreg53, %WZR;
GPR32:%vreg55,%vreg50,%vreg53
%vreg57<def> = IMPLICIT_DEF; GPR64sp:%vreg57
%vreg56<def> = LDRWui %vreg57, 0; mem:LD4[undef] GPR32common:%vreg56
GPR64sp:%vreg57
%vreg58<def> = EORWrr %vreg56, %vreg50; GPR32common:%vreg58,%vreg56
GPR32:%vreg50
%vreg59<def> = UDIVWr %vreg50, %vreg55; GPR32:%vreg59,%vreg50,%vreg55
%vreg60<def> = MADDWrrr %vreg59<kill>, %vreg55, %WZR;
GPR32:%vreg60,%vreg59,%vreg55
%vreg61<def> = COPY %XZR; GPR64:%vreg61
%vreg62<def> = MOVi64imm -8; GPR64:%vreg62
%vreg63<def> = CSELXr %vreg62, %vreg61, 3, %NZCV<imp-use>;
GPR64:%vreg63,%vreg62,%vreg61
%vreg64<def> = SUBSWrr %vreg50, %vreg60<kill>, %NZCV<imp-def,dead>;
GPR32common:%vreg64 GPR32:%vreg50,%vreg60
%vreg65<def> = SUBREG_TO_REG 0, %vreg64, sub_32; GPR64sp:%vreg65
GPR32common:%vreg64
%vreg66<def> = ADDXri %vreg65<kill>, 15, 0; GPR64common:%vreg66
GPR64sp:%vreg65
%vreg67<def> = ANDXri %vreg66<kill>, 7964; GPR64common:%vreg67,%vreg66
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg68<def> = COPY %SP; GPR64:%vreg68
%vreg69<def> = SUBSXrr %vreg68, %vreg67<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg69,%vreg67 GPR64:%vreg68
%SP<def> = COPY %vreg69; GPR64common:%vreg69
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg31<def> = COPY %vreg69; GPR64sp:%vreg31 GPR64common:%vreg69
%vreg70<def> = LDRBBui %vreg69, 0; mem:LD1[%A11](dereferenceable)
GPR32:%vreg70 GPR64common:%vreg69
%vreg71<def> = SUBREG_TO_REG 0, %vreg70<kill>, sub_32; GPR64:%vreg71
GPR32:%vreg70
%vreg72<def> = COPY %vreg71:sub_32; GPR32:%vreg72 GPR64:%vreg71
%vreg73<def> = ADDXri <fi#2>, 0, 0; GPR64sp:%vreg73
%vreg74<def> = ADDXrx %vreg73<kill>, %vreg53, 49; GPR64common:%vreg74
GPR64sp:%vreg73 GPR32:%vreg53
%vreg75<def> = SBFMXri %vreg71, 0, 0; GPR64:%vreg75,%vreg71
%vreg76<def> = ADDXri <fi#6>, 0, 0; GPR64common:%vreg76
%vreg77<def> = SUBSXrr %vreg76<kill>, %vreg71, %NZCV<imp-def,dead>;
GPR64common:%vreg77,%vreg76 GPR64:%vreg71
%vreg78<def> = LDRSBXui %vreg77, 0; mem:LD1[%G27] GPR64:%vreg78
GPR64common:%vreg77
%vreg79<def> = COPY %vreg78:sub_32; GPR32:%vreg79 GPR64:%vreg78
%vreg80<def> = ANDWri %vreg79, 7; GPR32sp:%vreg80 GPR32:%vreg79
%vreg34<def> = COPY %vreg80; GPR32all:%vreg34 GPR32sp:%vreg80
%vreg81<def> = LDRBBui <fi#6>, 0; mem:LD1[%A10](dereferenceable)
GPR32:%vreg81
%vreg82<def> = SUBREG_TO_REG 0, %vreg81<kill>, sub_32; GPR64:%vreg82
GPR32:%vreg81
%vreg83<def> = COPY %vreg82:sub_32; GPR32:%vreg83 GPR64:%vreg82
%vreg84<def> = SBFMWri %vreg83, 0, 7; GPR32:%vreg84,%vreg83
%vreg85<def> = UDIVWr %vreg58, %vreg58; GPR32:%vreg85
GPR32common:%vreg58
%vreg86<def> = LDRSHXui <fi#7>, 0; mem:LD2[%A16](dereferenceable)
GPR64:%vreg86
%vreg87<def> = COPY %vreg86:sub_32; GPR32:%vreg87 GPR64:%vreg86
%vreg88<def> = ANDWri %vreg87<kill>, 15; GPR32common:%vreg88
GPR32:%vreg87
%vreg89<def> = COPY %WZR; GPR32:%vreg89
%vreg25<def> = COPY %vreg89; GPR32:%vreg25,%vreg89
%vreg90<def> = ADDXri <fi#7>, 0, 0; GPR64common:%vreg90
%vreg91<def> = ADDXrs %vreg90, %vreg78, 1; GPR64:%vreg91,%vreg78
GPR64common:%vreg90
%vreg92<def> = SUBREG_TO_REG 0, %vreg58, sub_32; GPR64common:%vreg92
GPR32common:%vreg58
%vreg93<def> = ADDXri %vreg92, 15, 0; GPR64common:%vreg93,%vreg92
%vreg94<def> = ANDXri %vreg93<kill>, 7964; GPR64common:%vreg94,%vreg93
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg95<def> = COPY %SP; GPR64:%vreg95
%vreg96<def> = SUBSXrr %vreg95, %vreg94<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg96,%vreg94 GPR64:%vreg95
%SP<def> = COPY %vreg96; GPR64common:%vreg96
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg4<def> = COPY %vreg96; GPR64sp:%vreg4 GPR64common:%vreg96
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg97<def> = ANDXri %vreg49, 4127; GPR64common:%vreg97 GPR64:%vreg49
%vreg98<def> = UBFMXri %vreg97, 61, 60; GPR64common:%vreg98,%vreg97
%vreg99<def> = ADDXri %vreg98<kill>, 15, 0; GPR64common:%vreg99,%vreg98
%vreg100<def> = ANDXri %vreg99<kill>, 7967;
GPR64common:%vreg100,%vreg99
%vreg101<def> = COPY %SP; GPR64:%vreg101
%vreg102<def> = SUBSXrr %vreg101, %vreg100<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg102,%vreg100 GPR64:%vreg101
%SP<def> = COPY %vreg102; GPR64common:%vreg102
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg103<def> = LDRXui %vreg102, 0; mem:LD8[%A13](dereferenceable)
GPR64:%vreg103 GPR64common:%vreg102
%vreg105<def> = IMPLICIT_DEF; GPR64sp:%vreg105
%vreg104<def> = LDRSHXui %vreg105, 0; mem:LD2[undef] GPR64:%vreg104
GPR64sp:%vreg105
%vreg106<def> = COPY %vreg104:sub_32; GPR32:%vreg106 GPR64:%vreg104
%vreg107<def> = ORRWrr %vreg72<kill>, %vreg54<kill>;
GPR32:%vreg107,%vreg72,%vreg54
%vreg108<def> = ANDXri %vreg86, 4111; GPR64sp:%vreg108 GPR64:%vreg86
%vreg109<def> = COPY %vreg108:sub_32; GPR32:%vreg109 GPR64sp:%vreg108
%vreg110<def> = ASRVWr %vreg106, %vreg109<kill>;
GPR32:%vreg110,%vreg106,%vreg109
%vreg111<def> = ANDWri %vreg110<kill>, 15; GPR32common:%vreg111
GPR32:%vreg110
%vreg113<def> = IMPLICIT_DEF; GPR64sp:%vreg113
%vreg112<def> = LDRSBXui %vreg113, 0; mem:LD1[undef] GPR64:%vreg112
GPR64sp:%vreg113
%vreg114<def> = ANDXrr %vreg112, %vreg82;
GPR64:%vreg114,%vreg112,%vreg82
%vreg115<def> = COPY %vreg114:sub_32; GPR32:%vreg115 GPR64:%vreg114
%vreg116<def> = COPY %vreg112:sub_32; GPR32:%vreg116 GPR64:%vreg112
%vreg117<def> = UDIVWr %vreg111, %vreg88; GPR32:%vreg117
GPR32common:%vreg111,%vreg88
%vreg118<def> = MADDWrrr %vreg117<kill>, %vreg88, %WZR;
GPR32:%vreg118,%vreg117 GPR32common:%vreg88
%vreg119<def> = SBFMWri %vreg115, 0, 7; GPR32:%vreg119,%vreg115
%vreg120<def> = UBFMXri %vreg92, 61, 60; GPR64common:%vreg120,%vreg92
%vreg121<def> = ADDXri %vreg120<kill>, 15, 0;
GPR64common:%vreg121,%vreg120
%vreg122<def> = ANDXri %vreg121<kill>, 7967;
GPR64common:%vreg122,%vreg121
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg123<def> = COPY %SP; GPR64:%vreg123
%vreg124<def> = SUBSWrr %vreg111, %vreg118<kill>, %NZCV<imp-def,dead>;
GPR32:%vreg124,%vreg118 GPR32common:%vreg111
%vreg125<def> = SUBSXrr %vreg123, %vreg122<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg125,%vreg122 GPR64:%vreg123
%SP<def> = COPY %vreg125; GPR64common:%vreg125
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg126<def> = UBFMXri %vreg112, 61, 60; GPR64:%vreg126,%vreg112
%vreg127<def> = LDRDui %vreg125, 0; mem:LD8[%A8](dereferenceable)
FPR64:%vreg127 GPR64common:%vreg125
%vreg128<def> = SUBREG_TO_REG 0, %vreg85<kill>, sub_32; GPR64:%vreg128
GPR32:%vreg85
%vreg129<def> = UBFMXri %vreg128<kill>, 61, 60; GPR64common:%vreg129
GPR64:%vreg128
%vreg130<def> = ADDXri %vreg129<kill>, 15, 0;
GPR64common:%vreg130,%vreg129
%vreg131<def> = ANDXri %vreg130<kill>, 7967;
GPR64common:%vreg131,%vreg130
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg132<def> = COPY %SP; GPR64:%vreg132
%vreg133<def> = SUBSXrr %vreg132, %vreg131<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg133,%vreg131 GPR64:%vreg132
%SP<def> = COPY %vreg133; GPR64common:%vreg133
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg134<def> = UBFMXri %vreg104, 61, 60; GPR64:%vreg134,%vreg104
%vreg135<def> = ADDXrr %vreg103<kill>, %vreg134;
GPR64:%vreg135,%vreg103,%vreg134
%vreg136<def> = ADDXri <fi#3>, 0, 0; GPR64common:%vreg136
%vreg137<def> = ADDXrr %vreg136<kill>, %vreg134;
GPR64:%vreg137,%vreg134 GPR64common:%vreg136
%vreg138<def> = LDRDui %vreg133, 0; mem:LD8[%A5](dereferenceable)
FPR64:%vreg138 GPR64common:%vreg133
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg139<def> = SDIVWr %vreg53, %vreg55; GPR32:%vreg139,%vreg53,%vreg55
%vreg140<def> = SUBREG_TO_REG 0, %vreg55, sub_32; GPR64:%vreg140
GPR32:%vreg55
%vreg141<def> = UBFMXri %vreg140<kill>, 61, 60; GPR64common:%vreg141
GPR64:%vreg140
%vreg142<def> = ADDXri %vreg141<kill>, 15, 0;
GPR64common:%vreg142,%vreg141
%vreg143<def> = ANDXri %vreg142<kill>, 7967;
GPR64common:%vreg143,%vreg142
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg144<def> = COPY %SP; GPR64:%vreg144
%D0<def> = COPY %vreg138; FPR64:%vreg138
%D1<def> = COPY %vreg127; FPR64:%vreg127
BL <es:fmod>, <regmask %FP %LR %B8 %B9 %B10 %B11 %B12 %B13 %B14 %B15
%D8 %D9 %D10 %D11 %D12 %D13 %D14 %D15 %H8 %H9 %H10 %H11 %H12 %H13 %H14 %H15 %S8
%S9 %S10 %S11 %S12 %S13 %S14 %S15 %W19 %W20 %W21 %W22 %W23 %W24 %W25 %W26 %W27
%W28 %W29 %W30 %X19 %X20 %X21 %X22 %X23 %X24 %X25 %X26 %X27 %X28 %D8_D9 %D9_D10
%D10_D11 %D11_D12 %D12_D13 %D13_D14 %D14_D15 %D8_D9_D10_D11 %D9_D10_D11_D12
%D10_D11_D12_D13 %D11_D12_D13_D14 %D12_D13_D14_D15 %D8_D9_D10 %D9_D10_D11
%D10_D11_D12 %D11_D12_D13 %D12_D13_D14 %D13_D14_D15 %W19_W20 %W20_W21 %W21_W22
%W22_W23 %W23_W24 %W24_W25 %W25_W26 %W26_W27 %W27_W28 %W28_W29 %W29_W30 %FP_LR
%X28_FP %X19_X20 %X20_X21 %X21_X22 %X22_X23 %X23_X24 %X24_X25 %X25_X26 %X26_X27
%X27_X28>, %LR<imp-def,dead>, %SP<imp-use>, %D0<imp-use>, %D1<imp-use>,
%SP<imp-def>, %D0<imp-def>
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg145<def> = COPY %D0; FPR64:%vreg145
%vreg146<def> = SUBSXrr %vreg144, %vreg143<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg146,%vreg143 GPR64:%vreg144
%SP<def> = COPY %vreg146; GPR64common:%vreg146
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg147<def> = SUBREG_TO_REG 0, %vreg139<kill>, sub_32; GPR64:%vreg147
GPR32:%vreg139
%vreg148<def> = UBFMXri %vreg147<kill>, 61, 60; GPR64common:%vreg148
GPR64:%vreg147
%vreg149<def> = ADDXri %vreg148<kill>, 15, 0;
GPR64common:%vreg149,%vreg148
%vreg150<def> = ANDXri %vreg149<kill>, 7967;
GPR64common:%vreg150,%vreg149
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg151<def> = COPY %SP; GPR64:%vreg151
%vreg152<def> = SUBSXrr %vreg151, %vreg150<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg152,%vreg150 GPR64:%vreg151
%SP<def> = COPY %vreg152; GPR64common:%vreg152
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg153<def> = COPY %SP; GPR64common:%vreg153
%SP<def> = COPY %vreg153; GPR64common:%vreg153
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg154<def> = ADDXrx %vreg153, %vreg115, 35; GPR64sp:%vreg154
GPR64common:%vreg153 GPR32:%vreg115
%vreg156<def> = IMPLICIT_DEF; GPR64sp:%vreg156
%vreg155<def> = LDRHHui %vreg156, 0; mem:LD2[undef] GPR32:%vreg155
GPR64sp:%vreg156
%vreg157<def> = SBFMWri %vreg155, 0, 15; GPR32common:%vreg157
GPR32:%vreg155
%vreg158<def> = SUBREG_TO_REG 0, %vreg53, sub_32; GPR64:%vreg158
GPR32:%vreg53
%vreg159<def> = UBFMXri %vreg158<kill>, 63, 62; GPR64common:%vreg159
GPR64:%vreg158
%vreg160<def> = ADDXri %vreg159<kill>, 15, 0;
GPR64common:%vreg160,%vreg159
%vreg161<def> = ANDXri %vreg160<kill>, 7965;
GPR64common:%vreg161,%vreg160
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg162<def> = COPY %SP; GPR64:%vreg162
%vreg163<def> = SUBSXrr %vreg162, %vreg161<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg163,%vreg161 GPR64:%vreg162
%SP<def> = COPY %vreg163; GPR64common:%vreg163
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg164<def> = LDRSHWui %vreg163, 0; mem:LD2[%A1](dereferenceable)
GPR32:%vreg164 GPR64common:%vreg163
%vreg165<def> = ANDWri %vreg164, 15; GPR32common:%vreg165
GPR32:%vreg164
%vreg166<def> = ADDXri <fi#5>, 0, 0; GPR64common:%vreg166
%vreg167<def> = ADDXrs %vreg166<kill>, %vreg75<kill>, 3;
GPR64:%vreg167,%vreg75 GPR64common:%vreg166
%vreg41<def> = COPY %vreg167; GPR64sp:%vreg41 GPR64:%vreg167
%vreg169<def> = IMPLICIT_DEF; GPR64all:%vreg169
%vreg168<def,tied1> = INSERT_SUBREG %vreg169<tied0>, %vreg164, sub_32;
GPR64:%vreg168 GPR64all:%vreg169 GPR32:%vreg164
%vreg170<def> = SBFMXri %vreg168, 61, 15; GPR64:%vreg170,%vreg168
%vreg171<def> = ADDXrr %vreg137<kill>, %vreg170;
GPR64:%vreg171,%vreg137,%vreg170
%vreg172<def> = SBFMXri %vreg168, 0, 15; GPR64:%vreg172,%vreg168
%vreg173<def> = COPY %vreg168:sub_32; GPR32:%vreg173 GPR64:%vreg168
%vreg174<def> = COPY %vreg97:sub_32; GPR32:%vreg174 GPR64common:%vreg97
%vreg175<def> = ASRVWr %vreg52, %vreg174<kill>;
GPR32:%vreg175,%vreg52,%vreg174
%vreg176<def> = UDIVWr %vreg83, %vreg115;
GPR32:%vreg176,%vreg83,%vreg115
%vreg42<def> = SBFMWri %vreg176<kill>, 0, 7; GPR32:%vreg42,%vreg176
%vreg177<def> = ASRVWr %vreg42, %vreg83; GPR32:%vreg177,%vreg42,%vreg83
%vreg178<def> = SUBSWrx %vreg157, %vreg124, 40, %NZCV<imp-def>;
GPR32:%vreg178,%vreg124 GPR32common:%vreg157
%vreg23<def> = CSINVWr %WZR, %WZR, 10, %NZCV<imp-use>; GPR32:%vreg23
%vreg30<def> = CSINCWr %WZR, %WZR, 10, %NZCV<imp-use>; GPR32:%vreg30
%vreg180<def> = IMPLICIT_DEF; GPR64all:%vreg180
%vreg179<def,tied1> = INSERT_SUBREG %vreg180<tied0>, %vreg155, sub_32;
GPR64all:%vreg179,%vreg180 GPR32:%vreg155
%vreg181<def> = COPY %vreg179:sub_32; GPR32:%vreg181 GPR64all:%vreg179
%vreg182<def> = ADDXrx %vreg154<kill>, %vreg181<kill>, 43;
GPR64common:%vreg182 GPR64sp:%vreg154 GPR32:%vreg181
%vreg183<def> = SDIVWr %vreg84, %vreg79; GPR32:%vreg183,%vreg84,%vreg79
%vreg184<def> = MADDWrrr %vreg183<kill>, %vreg79, %WZR;
GPR32:%vreg184,%vreg183,%vreg79
%vreg185<def> = SUBSWrr %vreg84, %vreg184<kill>, %NZCV<imp-def,dead>;
GPR32:%vreg185,%vreg84,%vreg184
%vreg186<def> = SBFMWri %vreg185<kill>, 0, 7; GPR32:%vreg186,%vreg185
%vreg187<def> = SUBREG_TO_REG 0, %vreg175<kill>, sub_32;
GPR64sp:%vreg187 GPR32:%vreg175
%vreg188<def> = ADDXri %vreg187<kill>, 15, 0; GPR64common:%vreg188
GPR64sp:%vreg187
%vreg189<def> = ANDXri %vreg188<kill>, 7964;
GPR64common:%vreg189,%vreg188
ADJCALLSTACKDOWN 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg190<def> = COPY %SP; GPR64:%vreg190
%vreg191<def> = SUBSXrr %vreg190, %vreg189<kill>, %NZCV<imp-def,dead>;
GPR64common:%vreg191,%vreg189 GPR64:%vreg190
%SP<def> = COPY %vreg191; GPR64common:%vreg191
ADJCALLSTACKUP 0, 0, %SP<imp-def,dead>, %SP<imp-use>
%vreg43<def> = COPY %vreg191; GPR64sp:%vreg43 GPR64common:%vreg191
%vreg192<def> = LDRBBui %vreg191, 0; mem:LD1[%A2](dereferenceable)
GPR32:%vreg192 GPR64common:%vreg191
%vreg28<def> = ADDXrr %vreg153, %vreg63<kill>; GPR64:%vreg28,%vreg63
GPR64common:%vreg153
%vreg193<def> = LDRSBXui <fi#18>, 0; mem:LD1[%A](dereferenceable)
GPR64:%vreg193
%vreg194<def> = COPY %vreg193:sub_32; GPR32:%vreg194 GPR64:%vreg193
%vreg13<def> = LDRBBui %vreg77, 0; mem:LD1[%G27] GPR32:%vreg13
GPR64common:%vreg77
%vreg195<def> = ANDWrr %vreg106, %vreg164;
GPR32:%vreg195,%vreg106,%vreg164
%vreg196<def> = ADDXrx %vreg163, %vreg173<kill>, 41; GPR64sp:%vreg196
GPR64common:%vreg163 GPR32:%vreg173
%vreg47<def> = COPY %vreg182; GPR64all:%vreg47 GPR64common:%vreg182
%vreg197<def> = SUBSWrr %vreg13, %vreg194<kill>, %NZCV<imp-def,dead>;
GPR32:%vreg197,%vreg13,%vreg194
STRBBui %vreg197, %vreg191, 0; mem:ST1[%A2] GPR32:%vreg197
GPR64common:%vreg191
%vreg199<def> = IMPLICIT_DEF; GPR64all:%vreg199
%vreg198<def,tied1> = INSERT_SUBREG %vreg199<tied0>, %vreg197, sub_32;
GPR64:%vreg198 GPR64all:%vreg199 GPR32:%vreg197
%vreg200<def> = SBFMXri %vreg198<kill>, 0, 7; GPR64:%vreg200,%vreg198
%vreg1<def> = ADDXrs %vreg102, %vreg193, 3; GPR64:%vreg1,%vreg193
GPR64common:%vreg102
%vreg201<def> = SUBSWri %vreg56, 0, 0, %NZCV<imp-def>; GPR32:%vreg201
GPR32common:%vreg56
%vreg202<def> = MOVi64imm -2; GPR64:%vreg202
%vreg203<def> = CSELXr %vreg202<kill>, %vreg61, 1, %NZCV<imp-use>;
GPR64:%vreg203,%vreg202,%vreg61
%vreg204<def> = ADDXrr %vreg163, %vreg203<kill>;
GPR64common:%vreg204,%vreg163 GPR64:%vreg203
%vreg205<def> = LDRSHWroX %vreg90, %vreg86, 0, 1; mem:LD2[%G24]
GPR32:%vreg205 GPR64common:%vreg90 GPR64:%vreg86
%vreg206<def> = ANDWri %vreg205, 15; GPR32common:%vreg206
GPR32:%vreg205
%vreg207<def> = LDRHHui %vreg204, 0; mem:LD2[%G] GPR32:%vreg207
GPR64common:%vreg204
%vreg208<def> = SUBREG_TO_REG 0, %vreg207<kill>, sub_32; GPR64:%vreg208
GPR32:%vreg207
%vreg209<def> = COPY %vreg208:sub_32; GPR32:%vreg209 GPR64:%vreg208
%vreg210<def> = SBFMWri %vreg209, 0, 15; GPR32:%vreg210,%vreg209
%vreg33<def> = ADDXrr %vreg152, %vreg126; GPR64:%vreg33,%vreg126
GPR64common:%vreg152
%vreg211<def> = MOVi32imm 1; GPR32:%vreg211
%vreg212<def> = IMPLICIT_DEF; GPR64sp:%vreg212
STRBBui %vreg211<kill>, %vreg212, 0; mem:ST1[undef] GPR32:%vreg211
GPR64sp:%vreg212
STRXui %vreg204, %vreg182, 0; mem:ST8[%G2]
GPR64common:%vreg204,%vreg182
%vreg213<def> = UDIVWr %vreg165, %vreg165; GPR32:%vreg213
GPR32common:%vreg165
%vreg214<def> = MADDWrrr %vreg213<kill>, %vreg165, %WZR;
GPR32:%vreg214,%vreg213 GPR32common:%vreg165
%vreg215<def> = SUBSWrr %vreg165, %vreg214<kill>, %NZCV<imp-def,dead>;
GPR32:%vreg215,%vreg214 GPR32common:%vreg165
%vreg216<def> = SUBSWrr %vreg186<kill>, %vreg42, %NZCV<imp-def>;
GPR32:%vreg216,%vreg186,%vreg42
%vreg217<def> = CSELXr %vreg62, %vreg61, 10, %NZCV<imp-use>;
GPR64:%vreg217,%vreg62,%vreg61
%vreg218<def> = ADDXrr %vreg135<kill>, %vreg217<kill>;
GPR64:%vreg218,%vreg135,%vreg217
%vreg40<def> = ADDXrr %vreg96, %vreg172<kill>; GPR64:%vreg40,%vreg172
GPR64common:%vreg96
%vreg219<def> = LDRHHui %vreg196, 0; mem:LD2[%G4] GPR32:%vreg219
GPR64sp:%vreg196
%vreg220<def> = SUBREG_TO_REG 0, %vreg219<kill>, sub_32;
GPR64all:%vreg220 GPR32:%vreg219
%vreg221<def> = COPY %vreg220:sub_32; GPR32:%vreg221 GPR64all:%vreg220
%vreg222<def> = LSLVWr %vreg164, %vreg221;
GPR32:%vreg222,%vreg164,%vreg221
%vreg224<def> = IMPLICIT_DEF; GPR64all:%vreg224
%vreg223<def,tied1> = INSERT_SUBREG %vreg224<tied0>, %vreg222<kill>,
sub_32; GPR64all:%vreg223,%vreg224 GPR32:%vreg222
%vreg225<def> = COPY %vreg223:sub_32; GPR32:%vreg225 GPR64all:%vreg223
%vreg226<def> = ADDXri <fi#18>, 0, 0; GPR64sp:%vreg226
%vreg227<def> = ADDXrx %vreg226, %vreg225<kill>, 40;
GPR64common:%vreg227 GPR64sp:%vreg226 GPR32:%vreg225
%vreg228<def> = SUBSWri %vreg58, 0, 0, %NZCV<imp-def>; GPR32:%vreg228
GPR32common:%vreg58
%vreg229<def> = CSELXr %vreg62, %vreg61, 0, %NZCV<imp-use>;
GPR64:%vreg229,%vreg62,%vreg61
%vreg0<def> = ADDXrr %vreg33, %vreg229<kill>;
GPR64:%vreg0,%vreg33,%vreg229
%vreg231<def> = IMPLICIT_DEF; GPR64all:%vreg231
%vreg230<def,tied1> = INSERT_SUBREG %vreg231<tied0>, %vreg177<kill>,
sub_32; GPR64:%vreg230 GPR64all:%vreg231 GPR32:%vreg177
%vreg232<def> = ANDXri %vreg230<kill>, 4103; GPR64sp:%vreg232
GPR64:%vreg230
%vreg233<def> = COPY %vreg232:sub_32; GPR32:%vreg233 GPR64sp:%vreg232
%vreg12<def> = ASRVWr %vreg119<kill>, %vreg233<kill>;
GPR32:%vreg12,%vreg119,%vreg233
%vreg234<def> = ANDXri %vreg168, 4111; GPR64sp:%vreg234 GPR64:%vreg168
%vreg235<def> = COPY %vreg234:sub_32; GPR32:%vreg235 GPR64sp:%vreg234
%vreg236<def> = LSRVWr %vreg221, %vreg235<kill>;
GPR32:%vreg236,%vreg221,%vreg235
%vreg237<def> = SUBSWrr %vreg157, %vreg164, %NZCV<imp-def>;
GPR32:%vreg237,%vreg164 GPR32common:%vreg157
%vreg3<def> = CSINCWr %WZR, %WZR, 10, %NZCV<imp-use>; GPR32:%vreg3
%vreg239<def> = IMPLICIT_DEF; GPR64all:%vreg239
%vreg238<def,tied1> = INSERT_SUBREG %vreg239<tied0>, %vreg236<kill>,
sub_32; GPR64all:%vreg238,%vreg239 GPR32:%vreg236
%vreg240<def> = COPY %vreg238:sub_32; GPR32:%vreg240 GPR64all:%vreg238
%vreg241<def> = ADDXrx %vreg226, %vreg240<kill>, 40;
GPR64common:%vreg241 GPR64sp:%vreg226 GPR32:%vreg240
STRXui %vreg241<kill>, %vreg152, 0; mem:ST8[%A4]
GPR64common:%vreg241,%vreg152
%vreg242<def> = IMPLICIT_DEF; GPR64sp:%vreg242
STRXui %vreg227, %vreg242, 0; mem:ST8[undef] GPR64common:%vreg227
GPR64sp:%vreg242
%vreg243<def> = SDIVWr %vreg210, %vreg205;
GPR32:%vreg243,%vreg210,%vreg205
%vreg244<def> = MADDWrrr %vreg243<kill>, %vreg205, %WZR;
GPR32:%vreg244,%vreg243,%vreg205
%vreg245<def> = SUBSWrr %vreg210, %vreg244<kill>, %NZCV<imp-def,dead>;
GPR32:%vreg245,%vreg210,%vreg244
%vreg246<def> = IMPLICIT_DEF; GPR64sp:%vreg246
STRXui %vreg91<kill>, %vreg246, 0; mem:ST8[undef] GPR64:%vreg91
GPR64sp:%vreg246
%vreg247<def> = SUBSWri %vreg64, 0, 0, %NZCV<imp-def>; GPR32:%vreg247
GPR32common:%vreg64
%vreg248<def> = CSELXr %vreg62, %vreg61, 1, %NZCV<imp-use>;
GPR64:%vreg248,%vreg62,%vreg61
%vreg249<def> = UDIVWr %vreg215, %vreg206; GPR32:%vreg249,%vreg215
GPR32common:%vreg206
%vreg250<def> = MADDWrrr %vreg249<kill>, %vreg206, %WZR;
GPR32:%vreg250,%vreg249 GPR32common:%vreg206
%vreg251<def> = SUBSWrr %vreg215, %vreg250<kill>, %NZCV<imp-def,dead>;
GPR32:%vreg251,%vreg215,%vreg250
FCMPDrr %vreg138, %vreg138, %NZCV<imp-def>; FPR64:%vreg138
%vreg252<def> = CSINCWr %WZR, %WZR, 7, %NZCV<imp-use>; GPR32:%vreg252
%vreg48<def> = COPY %vreg252; GPR32all:%vreg48 GPR32:%vreg252
%vreg254<def> = IMPLICIT_DEF; GPR64all:%vreg254
%vreg253<def,tied1> = INSERT_SUBREG %vreg254<tied0>, %vreg245<kill>,
sub_32; GPR64all:%vreg253,%vreg254 GPR32:%vreg245
%vreg255<def> = COPY %vreg253:sub_32; GPR32:%vreg255 GPR64all:%vreg253
%vreg256<def> = ADDXrx %vreg196, %vreg255<kill>, 41;
GPR64common:%vreg256 GPR64sp:%vreg196 GPR32:%vreg255
%vreg258<def> = IMPLICIT_DEF; GPR64all:%vreg258
%vreg257<def,tied1> = INSERT_SUBREG %vreg258<tied0>, %vreg195<kill>,
sub_32; GPR64:%vreg257 GPR64all:%vreg258 GPR32:%vreg195
%vreg259<def> = COPY %vreg257:sub_32; GPR32:%vreg259 GPR64:%vreg257
%vreg260<def> = ADDXrx %vreg125, %vreg259<kill>, 43;
GPR64common:%vreg260,%vreg125 GPR32:%vreg259
%vreg261<def> = SBFMXri %vreg257, 0, 15; GPR64:%vreg261,%vreg257
%vreg262<def> = ANDWrr %vreg12, %vreg192<kill>;
GPR32:%vreg262,%vreg12,%vreg192
STRXui %vreg171<kill>, %vreg146, 0; mem:ST8[%A7] GPR64:%vreg171
GPR64common:%vreg146
%vreg263<def> = SBFMXri %vreg208, 0, 15; GPR64:%vreg263,%vreg208
STRDroX %vreg145, %vreg125, %vreg126, 0, 0; mem:ST8[%G9] FPR64:%vreg145
GPR64common:%vreg125 GPR64:%vreg126
%vreg264<def> = ADDXri <fi#10>, 0, 0; GPR64sp:%vreg264
STRXroX %vreg74<kill>, %vreg264<kill>, %vreg200<kill>, 0, 1;
mem:ST8[%G11] GPR64common:%vreg74 GPR64sp:%vreg264 GPR64:%vreg200
STRXroX %vreg218<kill>, %vreg102, %vreg170, 0, 0; mem:ST8[%G23]
GPR64:%vreg218,%vreg170 GPR64common:%vreg102
STRXroX %vreg256<kill>, %vreg153, %vreg263<kill>, 0, 1; mem:ST8[%G14]
GPR64common:%vreg256,%vreg153 GPR64:%vreg263
STRXroX %vreg260<kill>, %vreg102, %vreg134, 0, 0; mem:ST8[%G15]
GPR64common:%vreg260,%vreg102 GPR64:%vreg134
%vreg265<def> = IMPLICIT_DEF; GPR64sp:%vreg265
STRXui %vreg33, %vreg265, 0; mem:ST8[undef] GPR64:%vreg33
GPR64sp:%vreg265
%vreg266<def> = ADDWrr %vreg155, %vreg164;
GPR32:%vreg266,%vreg155,%vreg164
%vreg267<def> = SBFMWri %vreg266<kill>, 0, 15; GPR32:%vreg267,%vreg266
%vreg18<def> = EORWrr %vreg12, %vreg116<kill>;
GPR32:%vreg18,%vreg12,%vreg116
%vreg268<def> = ADDXrr %vreg227, %vreg261<kill>;
GPR64:%vreg268,%vreg261 GPR64common:%vreg227
%vreg19<def> = COPY %vreg268; GPR64common:%vreg19 GPR64:%vreg268
%vreg269<def> = IMPLICIT_DEF; GPR64sp:%vreg269
STRBBui %vreg262<kill>, %vreg269, 0; mem:ST1[undef] GPR32:%vreg262
GPR64sp:%vreg269
%vreg21<def> = ADDXrr %vreg125, %vreg248<kill>; GPR64:%vreg21,%vreg248
GPR64common:%vreg125
STRBBui %vreg107<kill>, %vreg96, 0; mem:ST1[%A9] GPR32:%vreg107
GPR64common:%vreg96
STRHHui %vreg251<kill>, %vreg196, 0; mem:ST2[%G25] GPR32:%vreg251
GPR64sp:%vreg196
%vreg45<def> = ASRVWr %vreg267<kill>, %vreg209;
GPR32:%vreg45,%vreg267,%vreg209
%vreg6<def> = UDIVWr %vreg155, %vreg124; GPR32:%vreg6,%vreg155,%vreg124
%vreg270<def> = CSELXr %vreg62, %vreg61, 6, %NZCV<imp-use>;
GPR64:%vreg270,%vreg62,%vreg61
%vreg44<def> = ADDXrr %vreg182, %vreg270<kill>; GPR64:%vreg44,%vreg270
GPR64common:%vreg182
STRHHui %vreg45, %vreg46, 0; mem:ST2[undef] GPR32:%vreg45
GPR64sp:%vreg46
STRXui %vreg44, %vreg29, 0; mem:ST8[undef] GPR64:%vreg44
GPR64sp:%vreg29
STRBBui %vreg42, %vreg43, 0; mem:ST1[%A2] GPR32:%vreg42 GPR64sp:%vreg43
STRXui %vreg40, %vreg41, 0; mem:ST8[%G20] GPR64:%vreg40 GPR64sp:%vreg41
%vreg35<def> = SUBREG_TO_REG 0, %vreg34, sub_32; GPR64:%vreg35
GPR32all:%vreg34
%vreg36<def> = SBFMXri %vreg35, 0, 7; GPR64:%vreg36,%vreg35
%vreg37<def> = MOVi64imm 8; GPR64:%vreg37
%vreg38<def> = MADDXrrr %vreg36<kill>, %vreg37<kill>, %XZR<kill>;
GPR64:%vreg38,%vreg36,%vreg37
%vreg39<def> = ADDXrr %vreg33, %vreg38<kill>; GPR64common:%vreg39
GPR64:%vreg33,%vreg38
%vreg32<def> = ANDWri %vreg30, 0; GPR32common:%vreg32 GPR32:%vreg30
STRBBui %vreg32, %vreg31, 0; mem:ST1[%A11] GPR32common:%vreg32
GPR64sp:%vreg31
STRXui %vreg28, %vreg29, 0; mem:ST8[undef] GPR64:%vreg28
GPR64sp:%vreg29
%vreg24<def> = SBFMWri %vreg23, 0, 0; GPR32:%vreg24,%vreg23
%vreg26<def> = SBFMWri %vreg25, 0, 0; GPR32:%vreg26,%vreg25
%WZR<def> = SUBSWrr %vreg24, %vreg26, %NZCV<imp-def>;
GPR32:%vreg24,%vreg26
%vreg27<def> = CSINCWr %WZR<kill>, %WZR<kill>, 13, %NZCV<imp-use>;
GPR32:%vreg27
STRXui %vreg21, %vreg22, 0; mem:ST8[undef] GPR64:%vreg21
GPR64sp:%vreg22
STRXui %vreg19, %vreg39, 0; mem:ST8[%G31] GPR64common:%vreg19,%vreg39
STRBBui %vreg18, %vreg19, 0; mem:ST1[%G26] GPR32:%vreg18
GPR64common:%vreg19
%vreg14<def> = SBFMWri %vreg12, 0, 7; GPR32:%vreg14,%vreg12
%vreg15<def> = ANDWri %vreg13, 7; GPR32common:%vreg15 GPR32:%vreg13
%vreg16<def> = ASRVWr %vreg14<kill>, %vreg15<kill>;
GPR32:%vreg16,%vreg14 GPR32common:%vreg15
%vreg17<def> = ANDWri %vreg16<kill>, 7; GPR32common:%vreg17
GPR32:%vreg16
STRBBui %vreg17, %vreg11, 0; mem:ST1[undef] GPR32common:%vreg17
GPR64sp:%vreg11
%vreg9<def> = ANDWri %vreg27, 0; GPR32common:%vreg9 GPR32:%vreg27
STRBBui %vreg9, %vreg8, 0; mem:ST1[undef] GPR32common:%vreg9
GPR64sp:%vreg8
STRHHui %vreg6, <fi#2>, 0; mem:ST2[FixedStack2] GPR32:%vreg6
%vreg5<def> = ANDWri %vreg3, 0; GPR32common:%vreg5 GPR32:%vreg3
STRBBui %vreg5, %vreg4, 0; mem:ST1[%A9] GPR32common:%vreg5
GPR64sp:%vreg4
STRXui %vreg1, %vreg2, 0; mem:ST8[undef] GPR64:%vreg1 GPR64sp:%vreg2
STRXui %vreg0, <fi#0>, 0; mem:ST8[FixedStack0] GPR64:%vreg0
RET_ReallyLR
# End machine code for function f.
*** Bad machine code: FrameSetup is after another FrameSetup ***
- function: f
- basic block: BB#0 (0x62100000f550)
- instruction: ADJCALLSTACKDOWN
*** Bad machine code: FrameDestroy is not after a FrameSetup ***
- function: f
- basic block: BB#0 (0x62100000f550)
- instruction: ADJCALLSTACKUP
LLVM ERROR: Found 2 machine code errors.
-----
--
You are receiving this mail because:
You are on the CC list for the bug.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-bugs/attachments/20170711/7e1e28a2/attachment-0001.html>
More information about the llvm-bugs
mailing list