[llvm] r230794 - [opaque pointer type] Add textual IR support for explicit type parameter to load instruction
David Blaikie
dblaikie at gmail.com
Fri Feb 27 13:18:04 PST 2015
Author: dblaikie
Date: Fri Feb 27 15:17:42 2015
New Revision: 230794
URL: http://llvm.org/viewvc/llvm-project?rev=230794&view=rev
Log:
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786.
A similar migration script can be used to update test cases, though a few more
test case improvements/changes were required this time around: (r229269-r229278)
import fileinput
import sys
import re
pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)")
for line in sys.stdin:
sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line))
Reviewers: rafael, dexonsmith, grosser
Differential Revision: http://reviews.llvm.org/D7649
Added:
llvm/trunk/test/Assembler/invalid-load-mismatched-explicit-type.ll
llvm/trunk/test/Assembler/invalid-load-missing-explicit-type.ll
Modified:
llvm/trunk/lib/AsmParser/LLParser.cpp
llvm/trunk/lib/IR/AsmWriter.cpp
llvm/trunk/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll
llvm/trunk/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll
llvm/trunk/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll
llvm/trunk/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll
llvm/trunk/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll
llvm/trunk/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll
llvm/trunk/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll
llvm/trunk/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll
llvm/trunk/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll
llvm/trunk/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
llvm/trunk/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
llvm/trunk/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll
llvm/trunk/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll
llvm/trunk/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll
llvm/trunk/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll
llvm/trunk/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll
llvm/trunk/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll
llvm/trunk/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll
llvm/trunk/test/Analysis/BasicAA/aligned-overread.ll
llvm/trunk/test/Analysis/BasicAA/args-rets-allocas-loads.ll
llvm/trunk/test/Analysis/BasicAA/byval.ll
llvm/trunk/test/Analysis/BasicAA/cas.ll
llvm/trunk/test/Analysis/BasicAA/dag.ll
llvm/trunk/test/Analysis/BasicAA/featuretest.ll
llvm/trunk/test/Analysis/BasicAA/full-store-partial-alias.ll
llvm/trunk/test/Analysis/BasicAA/gcsetest.ll
llvm/trunk/test/Analysis/BasicAA/gep-alias.ll
llvm/trunk/test/Analysis/BasicAA/global-size.ll
llvm/trunk/test/Analysis/BasicAA/invariant_load.ll
llvm/trunk/test/Analysis/BasicAA/memset_pattern.ll
llvm/trunk/test/Analysis/BasicAA/modref.ll
llvm/trunk/test/Analysis/BasicAA/must-and-partial.ll
llvm/trunk/test/Analysis/BasicAA/no-escape-call.ll
llvm/trunk/test/Analysis/BasicAA/noalias-bugs.ll
llvm/trunk/test/Analysis/BasicAA/noalias-param.ll
llvm/trunk/test/Analysis/BasicAA/nocapture.ll
llvm/trunk/test/Analysis/BasicAA/phi-aa.ll
llvm/trunk/test/Analysis/BasicAA/phi-spec-order.ll
llvm/trunk/test/Analysis/BasicAA/phi-speculation.ll
llvm/trunk/test/Analysis/BasicAA/pr18573.ll
llvm/trunk/test/Analysis/BasicAA/store-promote.ll
llvm/trunk/test/Analysis/BasicAA/tailcall-modref.ll
llvm/trunk/test/Analysis/BasicAA/underlying-value.ll
llvm/trunk/test/Analysis/BasicAA/zext.ll
llvm/trunk/test/Analysis/BlockFrequencyInfo/basic.ll
llvm/trunk/test/Analysis/BranchProbabilityInfo/basic.ll
llvm/trunk/test/Analysis/BranchProbabilityInfo/loop.ll
llvm/trunk/test/Analysis/BranchProbabilityInfo/pr18705.ll
llvm/trunk/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll
llvm/trunk/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll
llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll
llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel.ll
llvm/trunk/test/Analysis/CFLAliasAnalysis/must-and-partial.ll
llvm/trunk/test/Analysis/CostModel/AArch64/store.ll
llvm/trunk/test/Analysis/CostModel/ARM/insertelement.ll
llvm/trunk/test/Analysis/CostModel/PowerPC/load_store.ll
llvm/trunk/test/Analysis/CostModel/X86/intrinsic-cost.ll
llvm/trunk/test/Analysis/CostModel/X86/load_store.ll
llvm/trunk/test/Analysis/CostModel/X86/loop_v2.ll
llvm/trunk/test/Analysis/CostModel/X86/vectorized-loop.ll
llvm/trunk/test/Analysis/Delinearization/gcd_multiply_expr.ll
llvm/trunk/test/Analysis/Delinearization/himeno_1.ll
llvm/trunk/test/Analysis/Delinearization/himeno_2.ll
llvm/trunk/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
llvm/trunk/test/Analysis/Delinearization/undef.ll
llvm/trunk/test/Analysis/DependenceAnalysis/Banerjee.ll
llvm/trunk/test/Analysis/DependenceAnalysis/Coupled.ll
llvm/trunk/test/Analysis/DependenceAnalysis/ExactRDIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/ExactSIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/GCD.ll
llvm/trunk/test/Analysis/DependenceAnalysis/Invariant.ll
llvm/trunk/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
llvm/trunk/test/Analysis/DependenceAnalysis/Preliminary.ll
llvm/trunk/test/Analysis/DependenceAnalysis/Propagating.ll
llvm/trunk/test/Analysis/DependenceAnalysis/Separability.ll
llvm/trunk/test/Analysis/DependenceAnalysis/StrongSIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
llvm/trunk/test/Analysis/DependenceAnalysis/ZIV.ll
llvm/trunk/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll
llvm/trunk/test/Analysis/GlobalsModRef/aliastest.ll
llvm/trunk/test/Analysis/GlobalsModRef/chaining-analysis.ll
llvm/trunk/test/Analysis/GlobalsModRef/indirect-global.ll
llvm/trunk/test/Analysis/GlobalsModRef/modreftest.ll
llvm/trunk/test/Analysis/GlobalsModRef/pr12351.ll
llvm/trunk/test/Analysis/GlobalsModRef/volatile-instrs.ll
llvm/trunk/test/Analysis/LazyCallGraph/basic.ll
llvm/trunk/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll
llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll
llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
llvm/trunk/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
llvm/trunk/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
llvm/trunk/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
llvm/trunk/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
llvm/trunk/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
llvm/trunk/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
llvm/trunk/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-0.ll
llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-1.ll
llvm/trunk/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
llvm/trunk/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
llvm/trunk/test/Analysis/ScalarEvolution/load.ll
llvm/trunk/test/Analysis/ScalarEvolution/max-trip-count.ll
llvm/trunk/test/Analysis/ScalarEvolution/min-max-exprs.ll
llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset.ll
llvm/trunk/test/Analysis/ScalarEvolution/nsw.ll
llvm/trunk/test/Analysis/ScalarEvolution/pr22179.ll
llvm/trunk/test/Analysis/ScalarEvolution/pr22674.ll
llvm/trunk/test/Analysis/ScalarEvolution/scev-aa.ll
llvm/trunk/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-0.ll
llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-1.ll
llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-2.ll
llvm/trunk/test/Analysis/ScalarEvolution/sle.ll
llvm/trunk/test/Analysis/ScalarEvolution/trip-count11.ll
llvm/trunk/test/Analysis/ScalarEvolution/trip-count12.ll
llvm/trunk/test/Analysis/ScalarEvolution/trip-count4.ll
llvm/trunk/test/Analysis/ScalarEvolution/trip-count5.ll
llvm/trunk/test/Analysis/ScalarEvolution/trip-count6.ll
llvm/trunk/test/Analysis/ScalarEvolution/trip-count7.ll
llvm/trunk/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
llvm/trunk/test/Analysis/ScopedNoAliasAA/basic-domains.ll
llvm/trunk/test/Analysis/ScopedNoAliasAA/basic.ll
llvm/trunk/test/Analysis/ScopedNoAliasAA/basic2.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dse.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/licm.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/precedence.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/sink.ll
llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
llvm/trunk/test/Analysis/ValueTracking/memory-dereferenceable.ll
llvm/trunk/test/Assembler/2002-04-29-NameBinding.ll
llvm/trunk/test/Assembler/2002-08-19-BytecodeReader.ll
llvm/trunk/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll
llvm/trunk/test/Assembler/2004-06-07-VerifierBug.ll
llvm/trunk/test/Assembler/2007-01-05-Cmp-ConstExpr.ll
llvm/trunk/test/Assembler/2007-04-20-AlignedLoad.ll
llvm/trunk/test/Assembler/2007-12-11-AddressSpaces.ll
llvm/trunk/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll
llvm/trunk/test/Assembler/align-inst-load.ll
llvm/trunk/test/Assembler/align-inst.ll
llvm/trunk/test/Assembler/atomic.ll
llvm/trunk/test/Assembler/fast-math-flags.ll
llvm/trunk/test/Assembler/half-constprop.ll
llvm/trunk/test/Assembler/half-conv.ll
llvm/trunk/test/Assembler/insertextractvalue.ll
llvm/trunk/test/Assembler/numbered-values.ll
llvm/trunk/test/Assembler/unnamed.ll
llvm/trunk/test/Assembler/upgrade-loop-metadata.ll
llvm/trunk/test/Assembler/uselistorder.ll
llvm/trunk/test/Bitcode/arm32_neon_vcnt_upgrade.ll
llvm/trunk/test/Bitcode/case-ranges-3.3.ll
llvm/trunk/test/Bitcode/function-encoding-rel-operands.ll
llvm/trunk/test/Bitcode/memInstructions.3.2.ll
llvm/trunk/test/Bitcode/metadata-2.ll
llvm/trunk/test/Bitcode/upgrade-loop-metadata.ll
llvm/trunk/test/Bitcode/use-list-order.ll
llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll
llvm/trunk/test/CodeGen/AArch64/PBQP-chain.ll
llvm/trunk/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll
llvm/trunk/test/CodeGen/AArch64/PBQP-csr.ll
llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll
llvm/trunk/test/CodeGen/AArch64/a57-csel.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-be-bv.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-gep-opt.ll
llvm/trunk/test/CodeGen/AArch64/aarch64-smull.ll
llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll
llvm/trunk/test/CodeGen/AArch64/addsub.ll
llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll
llvm/trunk/test/CodeGen/AArch64/alloca.ll
llvm/trunk/test/CodeGen/AArch64/and-mask-removal.ll
llvm/trunk/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
llvm/trunk/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll
llvm/trunk/test/CodeGen/AArch64/arm64-abi.ll
llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll
llvm/trunk/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll
llvm/trunk/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll
llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll
llvm/trunk/test/CodeGen/AArch64/arm64-basic-pic.ll
llvm/trunk/test/CodeGen/AArch64/arm64-bcc.ll
llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll
llvm/trunk/test/CodeGen/AArch64/arm64-blockaddress.ll
llvm/trunk/test/CodeGen/AArch64/arm64-call-tailcalls.ll
llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll
llvm/trunk/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll
llvm/trunk/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh.ll
llvm/trunk/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll
llvm/trunk/test/CodeGen/AArch64/arm64-const-addr.ll
llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll
llvm/trunk/test/CodeGen/AArch64/arm64-cse.ll
llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll
llvm/trunk/test/CodeGen/AArch64/arm64-early-ifcvt.ll
llvm/trunk/test/CodeGen/AArch64/arm64-elf-globals.ll
llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll
llvm/trunk/test/CodeGen/AArch64/arm64-extend.ll
llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-br.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-call.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fold-address.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll
llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll
llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll
llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm.ll
llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll
llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll
llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll
llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
llvm/trunk/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll
llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint.ll
llvm/trunk/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
llvm/trunk/test/CodeGen/AArch64/arm64-platform-reg.ll
llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll
llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll
llvm/trunk/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll
llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll
llvm/trunk/test/CodeGen/AArch64/arm64-scaled_iv.ll
llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll
llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
llvm/trunk/test/CodeGen/AArch64/arm64-spill-lr.ll
llvm/trunk/test/CodeGen/AArch64/arm64-spill.ll
llvm/trunk/test/CodeGen/AArch64/arm64-stack-no-frame.ll
llvm/trunk/test/CodeGen/AArch64/arm64-strict-align.ll
llvm/trunk/test/CodeGen/AArch64/arm64-tls-darwin.ll
llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll
llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll
llvm/trunk/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll
llvm/trunk/test/CodeGen/AArch64/arm64-trunc-store.ll
llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vector-ldst.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vext.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vhadd.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vhsub.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vmax.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vmul.ll
llvm/trunk/test/CodeGen/AArch64/arm64-volatile.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vqadd.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vqsub.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vshift.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vshr.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vsqrt.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vsra.ll
llvm/trunk/test/CodeGen/AArch64/arm64-vsub.ll
llvm/trunk/test/CodeGen/AArch64/arm64-weak-reference.ll
llvm/trunk/test/CodeGen/AArch64/arm64-zextload-unscaled.ll
llvm/trunk/test/CodeGen/AArch64/arm64-zip.ll
llvm/trunk/test/CodeGen/AArch64/assertion-rc-mismatch.ll
llvm/trunk/test/CodeGen/AArch64/atomic-ops-not-barriers.ll
llvm/trunk/test/CodeGen/AArch64/atomic-ops.ll
llvm/trunk/test/CodeGen/AArch64/basic-pic.ll
llvm/trunk/test/CodeGen/AArch64/bitfield-insert-0.ll
llvm/trunk/test/CodeGen/AArch64/bitfield-insert.ll
llvm/trunk/test/CodeGen/AArch64/bitfield.ll
llvm/trunk/test/CodeGen/AArch64/blockaddress.ll
llvm/trunk/test/CodeGen/AArch64/bool-loads.ll
llvm/trunk/test/CodeGen/AArch64/breg.ll
llvm/trunk/test/CodeGen/AArch64/callee-save.ll
llvm/trunk/test/CodeGen/AArch64/cmpwithshort.ll
llvm/trunk/test/CodeGen/AArch64/code-model-large-abs.ll
llvm/trunk/test/CodeGen/AArch64/combine-comparisons-by-cse.ll
llvm/trunk/test/CodeGen/AArch64/compare-branch.ll
llvm/trunk/test/CodeGen/AArch64/complex-copy-noneon.ll
llvm/trunk/test/CodeGen/AArch64/complex-int-to-fp.ll
llvm/trunk/test/CodeGen/AArch64/dag-combine-invaraints.ll
llvm/trunk/test/CodeGen/AArch64/dp-3source.ll
llvm/trunk/test/CodeGen/AArch64/dp1.ll
llvm/trunk/test/CodeGen/AArch64/dp2.ll
llvm/trunk/test/CodeGen/AArch64/eliminate-trunc.ll
llvm/trunk/test/CodeGen/AArch64/f16-convert.ll
llvm/trunk/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext.ll
llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext2.ll
llvm/trunk/test/CodeGen/AArch64/fast-isel-int-ext3.ll
llvm/trunk/test/CodeGen/AArch64/floatdp_1source.ll
llvm/trunk/test/CodeGen/AArch64/floatdp_2source.ll
llvm/trunk/test/CodeGen/AArch64/fp128-folding.ll
llvm/trunk/test/CodeGen/AArch64/fp16-instructions.ll
llvm/trunk/test/CodeGen/AArch64/fp16-v4-instructions.ll
llvm/trunk/test/CodeGen/AArch64/fp16-v8-instructions.ll
llvm/trunk/test/CodeGen/AArch64/fp16-vector-load-store.ll
llvm/trunk/test/CodeGen/AArch64/fpimm.ll
llvm/trunk/test/CodeGen/AArch64/free-zext.ll
llvm/trunk/test/CodeGen/AArch64/func-argpassing.ll
llvm/trunk/test/CodeGen/AArch64/func-calls.ll
llvm/trunk/test/CodeGen/AArch64/funcptr_cast.ll
llvm/trunk/test/CodeGen/AArch64/ghc-cc.ll
llvm/trunk/test/CodeGen/AArch64/global-alignment.ll
llvm/trunk/test/CodeGen/AArch64/global-merge-4.ll
llvm/trunk/test/CodeGen/AArch64/half.ll
llvm/trunk/test/CodeGen/AArch64/i1-contents.ll
llvm/trunk/test/CodeGen/AArch64/ldst-opt.ll
llvm/trunk/test/CodeGen/AArch64/ldst-regoffset.ll
llvm/trunk/test/CodeGen/AArch64/ldst-unscaledimm.ll
llvm/trunk/test/CodeGen/AArch64/ldst-unsignedimm.ll
llvm/trunk/test/CodeGen/AArch64/literal_pools_float.ll
llvm/trunk/test/CodeGen/AArch64/local_vars.ll
llvm/trunk/test/CodeGen/AArch64/logical_shifted_reg.ll
llvm/trunk/test/CodeGen/AArch64/machine_cse.ll
llvm/trunk/test/CodeGen/AArch64/neon-fpround_f128.ll
llvm/trunk/test/CodeGen/AArch64/neon-truncStore-extLoad.ll
llvm/trunk/test/CodeGen/AArch64/nzcv-save.ll
llvm/trunk/test/CodeGen/AArch64/paired-load.ll
llvm/trunk/test/CodeGen/AArch64/pic-eh-stubs.ll
llvm/trunk/test/CodeGen/AArch64/ragreedy-csr.ll
llvm/trunk/test/CodeGen/AArch64/regress-tail-livereg.ll
llvm/trunk/test/CodeGen/AArch64/regress-tblgen-chains.ll
llvm/trunk/test/CodeGen/AArch64/regress-w29-reserved-with-fp.ll
llvm/trunk/test/CodeGen/AArch64/rm_redundant_cmp.ll
llvm/trunk/test/CodeGen/AArch64/sibling-call.ll
llvm/trunk/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
llvm/trunk/test/CodeGen/AArch64/tbz-tbnz.ll
llvm/trunk/test/CodeGen/AArch64/tst-br.ll
llvm/trunk/test/CodeGen/ARM/2006-11-10-CycleInDAG.ll
llvm/trunk/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
llvm/trunk/test/CodeGen/ARM/2007-03-07-CombinerCrash.ll
llvm/trunk/test/CodeGen/ARM/2007-03-13-InstrSched.ll
llvm/trunk/test/CodeGen/ARM/2007-03-21-JoinIntervalsCrash.ll
llvm/trunk/test/CodeGen/ARM/2007-03-27-RegScavengerAssert.ll
llvm/trunk/test/CodeGen/ARM/2007-04-02-RegScavengerAssert.ll
llvm/trunk/test/CodeGen/ARM/2007-04-03-UndefinedSymbol.ll
llvm/trunk/test/CodeGen/ARM/2007-04-30-CombinerCrash.ll
llvm/trunk/test/CodeGen/ARM/2007-05-03-BadPostIndexedLd.ll
llvm/trunk/test/CodeGen/ARM/2007-05-07-tailmerge-1.ll
llvm/trunk/test/CodeGen/ARM/2007-05-09-tailmerge-2.ll
llvm/trunk/test/CodeGen/ARM/2007-05-22-tailmerge-3.ll
llvm/trunk/test/CodeGen/ARM/2007-08-15-ReuseBug.ll
llvm/trunk/test/CodeGen/ARM/2008-02-04-LocalRegAllocBug.ll
llvm/trunk/test/CodeGen/ARM/2008-03-05-SxtInRegBug.ll
llvm/trunk/test/CodeGen/ARM/2008-03-07-RegScavengerAssert.ll
llvm/trunk/test/CodeGen/ARM/2008-04-10-ScavengerAssert.ll
llvm/trunk/test/CodeGen/ARM/2008-05-19-LiveIntervalsBug.ll
llvm/trunk/test/CodeGen/ARM/2008-07-24-CodeGenPrepCrash.ll
llvm/trunk/test/CodeGen/ARM/2008-08-07-AsmPrintBug.ll
llvm/trunk/test/CodeGen/ARM/2009-02-16-SpillerBug.ll
llvm/trunk/test/CodeGen/ARM/2009-02-22-SoftenFloatVaArg.ll
llvm/trunk/test/CodeGen/ARM/2009-02-27-SpillerBug.ll
llvm/trunk/test/CodeGen/ARM/2009-03-07-SpillerBug.ll
llvm/trunk/test/CodeGen/ARM/2009-04-06-AsmModifier.ll
llvm/trunk/test/CodeGen/ARM/2009-04-08-AggregateAddr.ll
llvm/trunk/test/CodeGen/ARM/2009-04-08-FloatUndef.ll
llvm/trunk/test/CodeGen/ARM/2009-04-09-RegScavengerAsm.ll
llvm/trunk/test/CodeGen/ARM/2009-05-11-CodePlacementCrash.ll
llvm/trunk/test/CodeGen/ARM/2009-06-04-MissingLiveIn.ll
llvm/trunk/test/CodeGen/ARM/2009-06-22-CoalescerBug.ll
llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert.ll
llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert2.ll
llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert3.ll
llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert4.ll
llvm/trunk/test/CodeGen/ARM/2009-06-30-RegScavengerAssert5.ll
llvm/trunk/test/CodeGen/ARM/2009-07-01-CommuteBug.ll
llvm/trunk/test/CodeGen/ARM/2009-07-18-RewriterBug.ll
llvm/trunk/test/CodeGen/ARM/2009-07-22-ScavengerAssert.ll
llvm/trunk/test/CodeGen/ARM/2009-07-22-SchedulerAssert.ll
llvm/trunk/test/CodeGen/ARM/2009-07-29-VFP3Registers.ll
llvm/trunk/test/CodeGen/ARM/2009-08-02-RegScavengerAssert-Neon.ll
llvm/trunk/test/CodeGen/ARM/2009-08-15-RegScavenger-EarlyClobber.ll
llvm/trunk/test/CodeGen/ARM/2009-08-21-PostRAKill.ll
llvm/trunk/test/CodeGen/ARM/2009-08-31-LSDA-Name.ll
llvm/trunk/test/CodeGen/ARM/2009-08-31-TwoRegShuffle.ll
llvm/trunk/test/CodeGen/ARM/2009-09-09-fpcmp-ole.ll
llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSubreg.ll
llvm/trunk/test/CodeGen/ARM/2009-09-13-InvalidSuperReg.ll
llvm/trunk/test/CodeGen/ARM/2009-09-23-LiveVariablesBug.ll
llvm/trunk/test/CodeGen/ARM/2009-09-24-spill-align.ll
llvm/trunk/test/CodeGen/ARM/2009-10-02-NEONSubregsBug.ll
llvm/trunk/test/CodeGen/ARM/2009-11-02-NegativeLane.ll
llvm/trunk/test/CodeGen/ARM/2009-11-07-SubRegAsmPrinting.ll
llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert.ll
llvm/trunk/test/CodeGen/ARM/2009-11-13-ScavengerAssert2.ll
llvm/trunk/test/CodeGen/ARM/2009-11-13-VRRewriterCrash.ll
llvm/trunk/test/CodeGen/ARM/2010-03-04-eabi-fp-spill.ll
llvm/trunk/test/CodeGen/ARM/2010-03-04-stm-undef-addr.ll
llvm/trunk/test/CodeGen/ARM/2010-05-17-FastAllocCrash.ll
llvm/trunk/test/CodeGen/ARM/2010-05-18-LocalAllocCrash.ll
llvm/trunk/test/CodeGen/ARM/2010-05-18-PostIndexBug.ll
llvm/trunk/test/CodeGen/ARM/2010-05-19-Shuffles.ll
llvm/trunk/test/CodeGen/ARM/2010-05-21-BuildVector.ll
llvm/trunk/test/CodeGen/ARM/2010-06-11-vmovdrr-bitcast.ll
llvm/trunk/test/CodeGen/ARM/2010-06-21-LdStMultipleBug.ll
llvm/trunk/test/CodeGen/ARM/2010-06-21-nondarwin-tc.ll
llvm/trunk/test/CodeGen/ARM/2010-06-25-Thumb2ITInvalidIterator.ll
llvm/trunk/test/CodeGen/ARM/2010-06-29-SubregImpDefs.ll
llvm/trunk/test/CodeGen/ARM/2010-07-26-GlobalMerge.ll
llvm/trunk/test/CodeGen/ARM/2010-08-04-EHCrash.ll
llvm/trunk/test/CodeGen/ARM/2010-08-04-StackVariable.ll
llvm/trunk/test/CodeGen/ARM/2010-11-15-SpillEarlyClobber.ll
llvm/trunk/test/CodeGen/ARM/2010-12-08-tpsoft.ll
llvm/trunk/test/CodeGen/ARM/2010-12-15-elf-lcomm.ll
llvm/trunk/test/CodeGen/ARM/2011-01-19-MergedGlobalDbg.ll
llvm/trunk/test/CodeGen/ARM/2011-02-07-AntidepClobber.ll
llvm/trunk/test/CodeGen/ARM/2011-03-10-DAGCombineCrash.ll
llvm/trunk/test/CodeGen/ARM/2011-03-15-LdStMultipleBug.ll
llvm/trunk/test/CodeGen/ARM/2011-04-07-schediv.ll
llvm/trunk/test/CodeGen/ARM/2011-04-11-MachineLICMBug.ll
llvm/trunk/test/CodeGen/ARM/2011-04-12-FastRegAlloc.ll
llvm/trunk/test/CodeGen/ARM/2011-04-26-SchedTweak.ll
llvm/trunk/test/CodeGen/ARM/2011-08-02-MergedGlobalDbg.ll
llvm/trunk/test/CodeGen/ARM/2011-08-29-SchedCycle.ll
llvm/trunk/test/CodeGen/ARM/2011-08-29-ldr_pre_imm.ll
llvm/trunk/test/CodeGen/ARM/2011-09-09-OddVectorDivision.ll
llvm/trunk/test/CodeGen/ARM/2011-09-28-CMovCombineBug.ll
llvm/trunk/test/CodeGen/ARM/2011-10-26-ExpandUnalignedLoadCrash.ll
llvm/trunk/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll
llvm/trunk/test/CodeGen/ARM/2011-11-09-BitcastVectorDouble.ll
llvm/trunk/test/CodeGen/ARM/2011-11-09-IllegalVectorFPIntConvert.ll
llvm/trunk/test/CodeGen/ARM/2011-11-14-EarlyClobber.ll
llvm/trunk/test/CodeGen/ARM/2011-11-28-DAGCombineBug.ll
llvm/trunk/test/CodeGen/ARM/2011-11-29-128bitArithmetics.ll
llvm/trunk/test/CodeGen/ARM/2011-11-30-MergeAlignment.ll
llvm/trunk/test/CodeGen/ARM/2011-12-14-machine-sink.ll
llvm/trunk/test/CodeGen/ARM/2011-12-19-sjlj-clobber.ll
llvm/trunk/test/CodeGen/ARM/2012-01-23-PostRA-LICM.ll
llvm/trunk/test/CodeGen/ARM/2012-01-24-RegSequenceLiveRange.ll
llvm/trunk/test/CodeGen/ARM/2012-01-26-CopyPropKills.ll
llvm/trunk/test/CodeGen/ARM/2012-02-01-CoalescerBug.ll
llvm/trunk/test/CodeGen/ARM/2012-03-13-DAGCombineBug.ll
llvm/trunk/test/CodeGen/ARM/2012-06-12-SchedMemLatency.ll
llvm/trunk/test/CodeGen/ARM/2012-08-04-DtripleSpillReload.ll
llvm/trunk/test/CodeGen/ARM/2012-08-08-legalize-unaligned.ll
llvm/trunk/test/CodeGen/ARM/2012-08-09-neon-extload.ll
llvm/trunk/test/CodeGen/ARM/2012-08-23-legalize-vmull.ll
llvm/trunk/test/CodeGen/ARM/2012-10-04-AAPCS-byval-align8.ll
llvm/trunk/test/CodeGen/ARM/2012-10-04-FixedFrame-vs-byval.ll
llvm/trunk/test/CodeGen/ARM/2013-01-21-PR14992.ll
llvm/trunk/test/CodeGen/ARM/2013-04-18-load-overlap-PR14824.ll
llvm/trunk/test/CodeGen/ARM/2013-05-07-ByteLoadSameAddress.ll
llvm/trunk/test/CodeGen/ARM/2013-05-31-char-shift-crash.ll
llvm/trunk/test/CodeGen/ARM/2013-07-29-vector-or-combine.ll
llvm/trunk/test/CodeGen/ARM/2014-01-09-pseudo_expand_implicit_reg.ll
llvm/trunk/test/CodeGen/ARM/2015-01-21-thumbv4t-ldstr-opt.ll
llvm/trunk/test/CodeGen/ARM/MergeConsecutiveStores.ll
llvm/trunk/test/CodeGen/ARM/Windows/chkstk-movw-movt-isel.ll
llvm/trunk/test/CodeGen/ARM/Windows/dllimport.ll
llvm/trunk/test/CodeGen/ARM/Windows/frame-register.ll
llvm/trunk/test/CodeGen/ARM/Windows/movw-movt-relocations.ll
llvm/trunk/test/CodeGen/ARM/Windows/pic.ll
llvm/trunk/test/CodeGen/ARM/Windows/stack-probe-non-default.ll
llvm/trunk/test/CodeGen/ARM/Windows/vla.ll
llvm/trunk/test/CodeGen/ARM/a15-partial-update.ll
llvm/trunk/test/CodeGen/ARM/addrmode.ll
llvm/trunk/test/CodeGen/ARM/aliases.ll
llvm/trunk/test/CodeGen/ARM/alloc-no-stack-realign.ll
llvm/trunk/test/CodeGen/ARM/arm-and-tst-peephole.ll
llvm/trunk/test/CodeGen/ARM/arm-modifier.ll
llvm/trunk/test/CodeGen/ARM/atomic-64bit.ll
llvm/trunk/test/CodeGen/ARM/atomic-load-store.ll
llvm/trunk/test/CodeGen/ARM/atomic-op.ll
llvm/trunk/test/CodeGen/ARM/atomic-ops-v8.ll
llvm/trunk/test/CodeGen/ARM/available_externally.ll
llvm/trunk/test/CodeGen/ARM/avoid-cpsr-rmw.ll
llvm/trunk/test/CodeGen/ARM/bfi.ll
llvm/trunk/test/CodeGen/ARM/bfx.ll
llvm/trunk/test/CodeGen/ARM/big-endian-neon-bitconv.ll
llvm/trunk/test/CodeGen/ARM/big-endian-neon-extend.ll
llvm/trunk/test/CodeGen/ARM/big-endian-neon-trunc-store.ll
llvm/trunk/test/CodeGen/ARM/big-endian-ret-f64.ll
llvm/trunk/test/CodeGen/ARM/big-endian-vector-caller.ll
llvm/trunk/test/CodeGen/ARM/bswap16.ll
llvm/trunk/test/CodeGen/ARM/call-tc.ll
llvm/trunk/test/CodeGen/ARM/call.ll
llvm/trunk/test/CodeGen/ARM/call_nolink.ll
llvm/trunk/test/CodeGen/ARM/coalesce-dbgvalue.ll
llvm/trunk/test/CodeGen/ARM/coalesce-subregs.ll
llvm/trunk/test/CodeGen/ARM/code-placement.ll
llvm/trunk/test/CodeGen/ARM/commute-movcc.ll
llvm/trunk/test/CodeGen/ARM/compare-call.ll
llvm/trunk/test/CodeGen/ARM/copy-paired-reg.ll
llvm/trunk/test/CodeGen/ARM/crash-greedy-v6.ll
llvm/trunk/test/CodeGen/ARM/crash.ll
llvm/trunk/test/CodeGen/ARM/cse-ldrlit.ll
llvm/trunk/test/CodeGen/ARM/cse-libcalls.ll
llvm/trunk/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
llvm/trunk/test/CodeGen/ARM/debug-frame-large-stack.ll
llvm/trunk/test/CodeGen/ARM/debug-frame-vararg.ll
llvm/trunk/test/CodeGen/ARM/debug-info-blocks.ll
llvm/trunk/test/CodeGen/ARM/divmod.ll
llvm/trunk/test/CodeGen/ARM/dwarf-eh.ll
llvm/trunk/test/CodeGen/ARM/dyn-stackalloc.ll
llvm/trunk/test/CodeGen/ARM/emit-big-cst.ll
llvm/trunk/test/CodeGen/ARM/extload-knownzero.ll
llvm/trunk/test/CodeGen/ARM/extloadi1.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-GEP-coalesce.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-align.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-call.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-fold.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-arm.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-ldr-str-thumb-neg-index.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-ldrh-strh-arm.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-load-store-verify.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-pic.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-pred.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-redefinition.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-static.ll
llvm/trunk/test/CodeGen/ARM/fast-isel-vararg.ll
llvm/trunk/test/CodeGen/ARM/fast-isel.ll
llvm/trunk/test/CodeGen/ARM/fastisel-gep-promote-before-add.ll
llvm/trunk/test/CodeGen/ARM/flag-crash.ll
llvm/trunk/test/CodeGen/ARM/fnegs.ll
llvm/trunk/test/CodeGen/ARM/fold-stack-adjust.ll
llvm/trunk/test/CodeGen/ARM/fp.ll
llvm/trunk/test/CodeGen/ARM/fp16.ll
llvm/trunk/test/CodeGen/ARM/fpcmp-opt.ll
llvm/trunk/test/CodeGen/ARM/fpmem.ll
llvm/trunk/test/CodeGen/ARM/fptoint.ll
llvm/trunk/test/CodeGen/ARM/frame-register.ll
llvm/trunk/test/CodeGen/ARM/fusedMAC.ll
llvm/trunk/test/CodeGen/ARM/ghc-tcreturn-lowered.ll
llvm/trunk/test/CodeGen/ARM/global-merge-1.ll
llvm/trunk/test/CodeGen/ARM/globals.ll
llvm/trunk/test/CodeGen/ARM/gv-stubs-crash.ll
llvm/trunk/test/CodeGen/ARM/half.ll
llvm/trunk/test/CodeGen/ARM/hidden-vis-2.ll
llvm/trunk/test/CodeGen/ARM/hidden-vis-3.ll
llvm/trunk/test/CodeGen/ARM/ifconv-kills.ll
llvm/trunk/test/CodeGen/ARM/ifconv-regmask.ll
llvm/trunk/test/CodeGen/ARM/ifcvt-branch-weight.ll
llvm/trunk/test/CodeGen/ARM/ifcvt11.ll
llvm/trunk/test/CodeGen/ARM/ifcvt5.ll
llvm/trunk/test/CodeGen/ARM/ifcvt7.ll
llvm/trunk/test/CodeGen/ARM/illegal-vector-bitcast.ll
llvm/trunk/test/CodeGen/ARM/indirectbr-2.ll
llvm/trunk/test/CodeGen/ARM/indirectbr.ll
llvm/trunk/test/CodeGen/ARM/inline-diagnostics.ll
llvm/trunk/test/CodeGen/ARM/interrupt-attr.ll
llvm/trunk/test/CodeGen/ARM/intrinsics-crypto.ll
llvm/trunk/test/CodeGen/ARM/invoke-donothing-assert.ll
llvm/trunk/test/CodeGen/ARM/isel-v8i32-crash.ll
llvm/trunk/test/CodeGen/ARM/krait-cpu-div-attribute.ll
llvm/trunk/test/CodeGen/ARM/large-stack.ll
llvm/trunk/test/CodeGen/ARM/ldm.ll
llvm/trunk/test/CodeGen/ARM/ldr.ll
llvm/trunk/test/CodeGen/ARM/ldr_ext.ll
llvm/trunk/test/CodeGen/ARM/ldr_frame.ll
llvm/trunk/test/CodeGen/ARM/ldr_post.ll
llvm/trunk/test/CodeGen/ARM/ldr_pre.ll
llvm/trunk/test/CodeGen/ARM/ldrd-memoper.ll
llvm/trunk/test/CodeGen/ARM/ldrd.ll
llvm/trunk/test/CodeGen/ARM/ldst-f32-2-i32.ll
llvm/trunk/test/CodeGen/ARM/ldstrex-m.ll
llvm/trunk/test/CodeGen/ARM/ldstrex.ll
llvm/trunk/test/CodeGen/ARM/load-global.ll
llvm/trunk/test/CodeGen/ARM/load.ll
llvm/trunk/test/CodeGen/ARM/load_i1_select.ll
llvm/trunk/test/CodeGen/ARM/long.ll
llvm/trunk/test/CodeGen/ARM/lsr-code-insertion.ll
llvm/trunk/test/CodeGen/ARM/lsr-icmp-imm.ll
llvm/trunk/test/CodeGen/ARM/lsr-unfolded-offset.ll
llvm/trunk/test/CodeGen/ARM/machine-cse-cmp.ll
llvm/trunk/test/CodeGen/ARM/machine-licm.ll
llvm/trunk/test/CodeGen/ARM/minsize-litpools.ll
llvm/trunk/test/CodeGen/ARM/misched-copy-arm.ll
llvm/trunk/test/CodeGen/ARM/mult-alt-generic-arm.ll
llvm/trunk/test/CodeGen/ARM/negative-offset.ll
llvm/trunk/test/CodeGen/ARM/neon_cmp.ll
llvm/trunk/test/CodeGen/ARM/neon_div.ll
llvm/trunk/test/CodeGen/ARM/neon_fpconv.ll
llvm/trunk/test/CodeGen/ARM/neon_ld1.ll
llvm/trunk/test/CodeGen/ARM/neon_ld2.ll
llvm/trunk/test/CodeGen/ARM/neon_spill.ll
llvm/trunk/test/CodeGen/ARM/no-fpu.ll
llvm/trunk/test/CodeGen/ARM/no-tail-call.ll
llvm/trunk/test/CodeGen/ARM/none-macho.ll
llvm/trunk/test/CodeGen/ARM/nop_concat_vectors.ll
llvm/trunk/test/CodeGen/ARM/optselect-regclass.ll
llvm/trunk/test/CodeGen/ARM/phi.ll
llvm/trunk/test/CodeGen/ARM/popcnt.ll
llvm/trunk/test/CodeGen/ARM/pr13249.ll
llvm/trunk/test/CodeGen/ARM/pr18364-movw.ll
llvm/trunk/test/CodeGen/ARM/pr3502.ll
llvm/trunk/test/CodeGen/ARM/private.ll
llvm/trunk/test/CodeGen/ARM/reg_sequence.ll
llvm/trunk/test/CodeGen/ARM/saxpy10-a9.ll
llvm/trunk/test/CodeGen/ARM/segmented-stacks.ll
llvm/trunk/test/CodeGen/ARM/select_xform.ll
llvm/trunk/test/CodeGen/ARM/shifter_operand.ll
llvm/trunk/test/CodeGen/ARM/smul.ll
llvm/trunk/test/CodeGen/ARM/space-directive.ll
llvm/trunk/test/CodeGen/ARM/spill-q.ll
llvm/trunk/test/CodeGen/ARM/ssp-data-layout.ll
llvm/trunk/test/CodeGen/ARM/stack-alignment.ll
llvm/trunk/test/CodeGen/ARM/str_post.ll
llvm/trunk/test/CodeGen/ARM/str_pre-2.ll
llvm/trunk/test/CodeGen/ARM/str_pre.ll
llvm/trunk/test/CodeGen/ARM/struct-byval-frame-index.ll
llvm/trunk/test/CodeGen/ARM/sub-cmp-peephole.ll
llvm/trunk/test/CodeGen/ARM/swift-atomics.ll
llvm/trunk/test/CodeGen/ARM/swift-vldm.ll
llvm/trunk/test/CodeGen/ARM/tail-dup.ll
llvm/trunk/test/CodeGen/ARM/test-sharedidx.ll
llvm/trunk/test/CodeGen/ARM/thumb1-varalloc.ll
llvm/trunk/test/CodeGen/ARM/thumb1_return_sequence.ll
llvm/trunk/test/CodeGen/ARM/thumb_indirect_calls.ll
llvm/trunk/test/CodeGen/ARM/tls1.ll
llvm/trunk/test/CodeGen/ARM/tls2.ll
llvm/trunk/test/CodeGen/ARM/tls3.ll
llvm/trunk/test/CodeGen/ARM/trunc_ldr.ll
llvm/trunk/test/CodeGen/ARM/truncstore-dag-combine.ll
llvm/trunk/test/CodeGen/ARM/twoaddrinstr.ll
llvm/trunk/test/CodeGen/ARM/uint64tof64.ll
llvm/trunk/test/CodeGen/ARM/umulo-32.ll
llvm/trunk/test/CodeGen/ARM/unaligned_load_store.ll
llvm/trunk/test/CodeGen/ARM/unaligned_load_store_vector.ll
llvm/trunk/test/CodeGen/ARM/undef-sext.ll
llvm/trunk/test/CodeGen/ARM/vaba.ll
llvm/trunk/test/CodeGen/ARM/vabd.ll
llvm/trunk/test/CodeGen/ARM/vabs.ll
llvm/trunk/test/CodeGen/ARM/vadd.ll
llvm/trunk/test/CodeGen/ARM/vargs_align.ll
llvm/trunk/test/CodeGen/ARM/vbits.ll
llvm/trunk/test/CodeGen/ARM/vbsl-constant.ll
llvm/trunk/test/CodeGen/ARM/vbsl.ll
llvm/trunk/test/CodeGen/ARM/vceq.ll
llvm/trunk/test/CodeGen/ARM/vcge.ll
llvm/trunk/test/CodeGen/ARM/vcgt.ll
llvm/trunk/test/CodeGen/ARM/vcnt.ll
llvm/trunk/test/CodeGen/ARM/vcombine.ll
llvm/trunk/test/CodeGen/ARM/vcvt-cost.ll
llvm/trunk/test/CodeGen/ARM/vcvt-v8.ll
llvm/trunk/test/CodeGen/ARM/vcvt.ll
llvm/trunk/test/CodeGen/ARM/vcvt_combine.ll
llvm/trunk/test/CodeGen/ARM/vdiv_combine.ll
llvm/trunk/test/CodeGen/ARM/vdup.ll
llvm/trunk/test/CodeGen/ARM/vector-DAGCombine.ll
llvm/trunk/test/CodeGen/ARM/vector-extend-narrow.ll
llvm/trunk/test/CodeGen/ARM/vector-load.ll
llvm/trunk/test/CodeGen/ARM/vector-promotion.ll
llvm/trunk/test/CodeGen/ARM/vector-spilling.ll
llvm/trunk/test/CodeGen/ARM/vector-store.ll
llvm/trunk/test/CodeGen/ARM/vext.ll
llvm/trunk/test/CodeGen/ARM/vfcmp.ll
llvm/trunk/test/CodeGen/ARM/vfp.ll
llvm/trunk/test/CodeGen/ARM/vget_lane.ll
llvm/trunk/test/CodeGen/ARM/vhadd.ll
llvm/trunk/test/CodeGen/ARM/vhsub.ll
llvm/trunk/test/CodeGen/ARM/vicmp.ll
llvm/trunk/test/CodeGen/ARM/vld1.ll
llvm/trunk/test/CodeGen/ARM/vld2.ll
llvm/trunk/test/CodeGen/ARM/vld3.ll
llvm/trunk/test/CodeGen/ARM/vld4.ll
llvm/trunk/test/CodeGen/ARM/vlddup.ll
llvm/trunk/test/CodeGen/ARM/vldlane.ll
llvm/trunk/test/CodeGen/ARM/vldm-liveness.ll
llvm/trunk/test/CodeGen/ARM/vldm-sched-a9.ll
llvm/trunk/test/CodeGen/ARM/vminmax.ll
llvm/trunk/test/CodeGen/ARM/vminmaxnm.ll
llvm/trunk/test/CodeGen/ARM/vmla.ll
llvm/trunk/test/CodeGen/ARM/vmls.ll
llvm/trunk/test/CodeGen/ARM/vmov.ll
llvm/trunk/test/CodeGen/ARM/vmul.ll
llvm/trunk/test/CodeGen/ARM/vneg.ll
llvm/trunk/test/CodeGen/ARM/vpadal.ll
llvm/trunk/test/CodeGen/ARM/vpadd.ll
llvm/trunk/test/CodeGen/ARM/vpminmax.ll
llvm/trunk/test/CodeGen/ARM/vqadd.ll
llvm/trunk/test/CodeGen/ARM/vqdmul.ll
llvm/trunk/test/CodeGen/ARM/vqshl.ll
llvm/trunk/test/CodeGen/ARM/vqshrn.ll
llvm/trunk/test/CodeGen/ARM/vqsub.ll
llvm/trunk/test/CodeGen/ARM/vrec.ll
llvm/trunk/test/CodeGen/ARM/vrev.ll
llvm/trunk/test/CodeGen/ARM/vselect_imax.ll
llvm/trunk/test/CodeGen/ARM/vshift.ll
llvm/trunk/test/CodeGen/ARM/vshiftins.ll
llvm/trunk/test/CodeGen/ARM/vshl.ll
llvm/trunk/test/CodeGen/ARM/vshll.ll
llvm/trunk/test/CodeGen/ARM/vshrn.ll
llvm/trunk/test/CodeGen/ARM/vsra.ll
llvm/trunk/test/CodeGen/ARM/vst1.ll
llvm/trunk/test/CodeGen/ARM/vst2.ll
llvm/trunk/test/CodeGen/ARM/vst3.ll
llvm/trunk/test/CodeGen/ARM/vst4.ll
llvm/trunk/test/CodeGen/ARM/vstlane.ll
llvm/trunk/test/CodeGen/ARM/vsub.ll
llvm/trunk/test/CodeGen/ARM/vtbl.ll
llvm/trunk/test/CodeGen/ARM/vtrn.ll
llvm/trunk/test/CodeGen/ARM/vuzp.ll
llvm/trunk/test/CodeGen/ARM/vzip.ll
llvm/trunk/test/CodeGen/ARM/zextload_demandedbits.ll
llvm/trunk/test/CodeGen/BPF/basictest.ll
llvm/trunk/test/CodeGen/BPF/ex1.ll
llvm/trunk/test/CodeGen/BPF/intrinsics.ll
llvm/trunk/test/CodeGen/BPF/load.ll
llvm/trunk/test/CodeGen/BPF/loops.ll
llvm/trunk/test/CodeGen/BPF/struct_ret1.ll
llvm/trunk/test/CodeGen/CPP/2009-05-01-Long-Double.ll
llvm/trunk/test/CodeGen/CPP/2009-05-04-CondBr.ll
llvm/trunk/test/CodeGen/Generic/2003-05-28-ManyArgs.ll
llvm/trunk/test/CodeGen/Generic/2003-05-30-BadFoldGEP.ll
llvm/trunk/test/CodeGen/Generic/2003-07-29-BadConstSbyte.ll
llvm/trunk/test/CodeGen/Generic/2004-05-09-LiveVarPartialRegister.ll
llvm/trunk/test/CodeGen/Generic/2006-02-12-InsertLibcall.ll
llvm/trunk/test/CodeGen/Generic/2006-03-01-dagcombineinfloop.ll
llvm/trunk/test/CodeGen/Generic/2006-04-26-SetCCAnd.ll
llvm/trunk/test/CodeGen/Generic/2006-06-13-ComputeMaskedBitsCrash.ll
llvm/trunk/test/CodeGen/Generic/2006-06-28-SimplifySetCCCrash.ll
llvm/trunk/test/CodeGen/Generic/2006-09-02-LocalAllocCrash.ll
llvm/trunk/test/CodeGen/Generic/2006-11-20-DAGCombineCrash.ll
llvm/trunk/test/CodeGen/Generic/2007-01-15-LoadSelectCycle.ll
llvm/trunk/test/CodeGen/Generic/2008-01-25-dag-combine-mul.ll
llvm/trunk/test/CodeGen/Generic/2008-01-30-LoadCrash.ll
llvm/trunk/test/CodeGen/Generic/2008-02-25-NegateZero.ll
llvm/trunk/test/CodeGen/Generic/2009-03-29-SoftFloatVectorExtract.ll
llvm/trunk/test/CodeGen/Generic/2009-04-28-i128-cmp-crash.ll
llvm/trunk/test/CodeGen/Generic/2011-07-07-ScheduleDAGCrash.ll
llvm/trunk/test/CodeGen/Generic/2012-06-08-APIntCrash.ll
llvm/trunk/test/CodeGen/Generic/2014-02-05-OpaqueConstants.ll
llvm/trunk/test/CodeGen/Generic/APIntLoadStore.ll
llvm/trunk/test/CodeGen/Generic/badFoldGEP.ll
llvm/trunk/test/CodeGen/Generic/builtin-expect.ll
llvm/trunk/test/CodeGen/Generic/cast-fp.ll
llvm/trunk/test/CodeGen/Generic/constindices.ll
llvm/trunk/test/CodeGen/Generic/crash.ll
llvm/trunk/test/CodeGen/Generic/dag-combine-crash.ll
llvm/trunk/test/CodeGen/Generic/empty-load-store.ll
llvm/trunk/test/CodeGen/Generic/empty-phi.ll
llvm/trunk/test/CodeGen/Generic/fp-to-int-invalid.ll
llvm/trunk/test/CodeGen/Generic/fwdtwice.ll
llvm/trunk/test/CodeGen/Generic/global-ret0.ll
llvm/trunk/test/CodeGen/Generic/inline-asm-mem-clobber.ll
llvm/trunk/test/CodeGen/Generic/pr2625.ll
llvm/trunk/test/CodeGen/Generic/print-arith-fp.ll
llvm/trunk/test/CodeGen/Generic/print-arith-int.ll
llvm/trunk/test/CodeGen/Generic/print-mul-exp.ll
llvm/trunk/test/CodeGen/Generic/print-mul.ll
llvm/trunk/test/CodeGen/Generic/print-shift.ll
llvm/trunk/test/CodeGen/Generic/select.ll
llvm/trunk/test/CodeGen/Generic/undef-phi.ll
llvm/trunk/test/CodeGen/Generic/v-split.ll
llvm/trunk/test/CodeGen/Generic/vector-casts.ll
llvm/trunk/test/CodeGen/Generic/vector-identity-shuffle.ll
llvm/trunk/test/CodeGen/Generic/vector.ll
llvm/trunk/test/CodeGen/Hexagon/BranchPredict.ll
llvm/trunk/test/CodeGen/Hexagon/absaddr-store.ll
llvm/trunk/test/CodeGen/Hexagon/absimm.ll
llvm/trunk/test/CodeGen/Hexagon/always-ext.ll
llvm/trunk/test/CodeGen/Hexagon/block-addr.ll
llvm/trunk/test/CodeGen/Hexagon/cext-check.ll
llvm/trunk/test/CodeGen/Hexagon/cext-valid-packet2.ll
llvm/trunk/test/CodeGen/Hexagon/cmp_pred2.ll
llvm/trunk/test/CodeGen/Hexagon/cmpb_pred.ll
llvm/trunk/test/CodeGen/Hexagon/combine.ll
llvm/trunk/test/CodeGen/Hexagon/combine_ir.ll
llvm/trunk/test/CodeGen/Hexagon/convertdptoint.ll
llvm/trunk/test/CodeGen/Hexagon/convertdptoll.ll
llvm/trunk/test/CodeGen/Hexagon/convertsptoint.ll
llvm/trunk/test/CodeGen/Hexagon/convertsptoll.ll
llvm/trunk/test/CodeGen/Hexagon/dadd.ll
llvm/trunk/test/CodeGen/Hexagon/dmul.ll
llvm/trunk/test/CodeGen/Hexagon/double.ll
llvm/trunk/test/CodeGen/Hexagon/doubleconvert-ieee-rnd-near.ll
llvm/trunk/test/CodeGen/Hexagon/dsub.ll
llvm/trunk/test/CodeGen/Hexagon/extload-combine.ll
llvm/trunk/test/CodeGen/Hexagon/fadd.ll
llvm/trunk/test/CodeGen/Hexagon/fcmp.ll
llvm/trunk/test/CodeGen/Hexagon/float.ll
llvm/trunk/test/CodeGen/Hexagon/floatconvert-ieee-rnd-near.ll
llvm/trunk/test/CodeGen/Hexagon/fmul.ll
llvm/trunk/test/CodeGen/Hexagon/frame.ll
llvm/trunk/test/CodeGen/Hexagon/fsub.ll
llvm/trunk/test/CodeGen/Hexagon/fusedandshift.ll
llvm/trunk/test/CodeGen/Hexagon/gp-plus-offset-load.ll
llvm/trunk/test/CodeGen/Hexagon/gp-rel.ll
llvm/trunk/test/CodeGen/Hexagon/hwloop-cleanup.ll
llvm/trunk/test/CodeGen/Hexagon/hwloop-dbg.ll
llvm/trunk/test/CodeGen/Hexagon/hwloop-le.ll
llvm/trunk/test/CodeGen/Hexagon/hwloop-lt.ll
llvm/trunk/test/CodeGen/Hexagon/hwloop-ne.ll
llvm/trunk/test/CodeGen/Hexagon/i16_VarArg.ll
llvm/trunk/test/CodeGen/Hexagon/i1_VarArg.ll
llvm/trunk/test/CodeGen/Hexagon/i8_VarArg.ll
llvm/trunk/test/CodeGen/Hexagon/idxload-with-zero-offset.ll
llvm/trunk/test/CodeGen/Hexagon/macint.ll
llvm/trunk/test/CodeGen/Hexagon/memops.ll
llvm/trunk/test/CodeGen/Hexagon/memops1.ll
llvm/trunk/test/CodeGen/Hexagon/memops2.ll
llvm/trunk/test/CodeGen/Hexagon/memops3.ll
llvm/trunk/test/CodeGen/Hexagon/misaligned-access.ll
llvm/trunk/test/CodeGen/Hexagon/mpy.ll
llvm/trunk/test/CodeGen/Hexagon/newvaluejump.ll
llvm/trunk/test/CodeGen/Hexagon/newvaluejump2.ll
llvm/trunk/test/CodeGen/Hexagon/newvaluestore.ll
llvm/trunk/test/CodeGen/Hexagon/opt-fabs.ll
llvm/trunk/test/CodeGen/Hexagon/opt-fneg.ll
llvm/trunk/test/CodeGen/Hexagon/postinc-load.ll
llvm/trunk/test/CodeGen/Hexagon/postinc-store.ll
llvm/trunk/test/CodeGen/Hexagon/pred-gp.ll
llvm/trunk/test/CodeGen/Hexagon/pred-instrs.ll
llvm/trunk/test/CodeGen/Hexagon/remove_lsr.ll
llvm/trunk/test/CodeGen/Hexagon/static.ll
llvm/trunk/test/CodeGen/Hexagon/struct_args.ll
llvm/trunk/test/CodeGen/Hexagon/tfr-to-combine.ll
llvm/trunk/test/CodeGen/Hexagon/union-1.ll
llvm/trunk/test/CodeGen/Hexagon/vaddh.ll
llvm/trunk/test/CodeGen/Hexagon/validate-offset.ll
llvm/trunk/test/CodeGen/Hexagon/zextloadi1.ll
llvm/trunk/test/CodeGen/MSP430/2009-05-10-CyclicDAG.ll
llvm/trunk/test/CodeGen/MSP430/2009-05-17-Rot.ll
llvm/trunk/test/CodeGen/MSP430/2009-05-17-Shift.ll
llvm/trunk/test/CodeGen/MSP430/2009-08-25-DynamicStackAlloc.ll
llvm/trunk/test/CodeGen/MSP430/2009-09-18-AbsoluteAddr.ll
llvm/trunk/test/CodeGen/MSP430/2009-10-10-OrImpDef.ll
llvm/trunk/test/CodeGen/MSP430/2009-11-08-InvalidResNo.ll
llvm/trunk/test/CodeGen/MSP430/2010-05-01-CombinerAnd.ll
llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-rx.ll
llvm/trunk/test/CodeGen/MSP430/AddrMode-bis-xr.ll
llvm/trunk/test/CodeGen/MSP430/AddrMode-mov-rx.ll
llvm/trunk/test/CodeGen/MSP430/Inst16mi.ll
llvm/trunk/test/CodeGen/MSP430/Inst16mm.ll
llvm/trunk/test/CodeGen/MSP430/Inst16mr.ll
llvm/trunk/test/CodeGen/MSP430/Inst16rm.ll
llvm/trunk/test/CodeGen/MSP430/Inst8mi.ll
llvm/trunk/test/CodeGen/MSP430/Inst8mm.ll
llvm/trunk/test/CodeGen/MSP430/Inst8mr.ll
llvm/trunk/test/CodeGen/MSP430/Inst8rm.ll
llvm/trunk/test/CodeGen/MSP430/bit.ll
llvm/trunk/test/CodeGen/MSP430/byval.ll
llvm/trunk/test/CodeGen/MSP430/indirectbr.ll
llvm/trunk/test/CodeGen/MSP430/indirectbr2.ll
llvm/trunk/test/CodeGen/MSP430/inline-asm.ll
llvm/trunk/test/CodeGen/MSP430/jumptable.ll
llvm/trunk/test/CodeGen/MSP430/memset.ll
llvm/trunk/test/CodeGen/MSP430/misched-msp430.ll
llvm/trunk/test/CodeGen/MSP430/mult-alt-generic-msp430.ll
llvm/trunk/test/CodeGen/MSP430/postinc.ll
llvm/trunk/test/CodeGen/Mips/2008-07-15-SmallSection.ll
llvm/trunk/test/CodeGen/Mips/2008-08-01-AsmInline.ll
llvm/trunk/test/CodeGen/Mips/2008-08-03-ReturnDouble.ll
llvm/trunk/test/CodeGen/Mips/2008-10-13-LegalizerBug.ll
llvm/trunk/test/CodeGen/Mips/2008-11-10-xint_to_fp.ll
llvm/trunk/test/CodeGen/Mips/2010-07-20-Switch.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/br1.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/callabi.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpcmpa.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpext.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/fpintconv.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/fptrunc.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/icmpa.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstore2.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/loadstoreconv.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/overflt.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/retabi.ll
llvm/trunk/test/CodeGen/Mips/Fast-ISel/shift.ll
llvm/trunk/test/CodeGen/Mips/addi.ll
llvm/trunk/test/CodeGen/Mips/addressing-mode.ll
llvm/trunk/test/CodeGen/Mips/align16.ll
llvm/trunk/test/CodeGen/Mips/alloca.ll
llvm/trunk/test/CodeGen/Mips/alloca16.ll
llvm/trunk/test/CodeGen/Mips/and1.ll
llvm/trunk/test/CodeGen/Mips/atomic.ll
llvm/trunk/test/CodeGen/Mips/atomicops.ll
llvm/trunk/test/CodeGen/Mips/beqzc.ll
llvm/trunk/test/CodeGen/Mips/beqzc1.ll
llvm/trunk/test/CodeGen/Mips/biggot.ll
llvm/trunk/test/CodeGen/Mips/brconeq.ll
llvm/trunk/test/CodeGen/Mips/brconeqk.ll
llvm/trunk/test/CodeGen/Mips/brconeqz.ll
llvm/trunk/test/CodeGen/Mips/brconge.ll
llvm/trunk/test/CodeGen/Mips/brcongt.ll
llvm/trunk/test/CodeGen/Mips/brconle.ll
llvm/trunk/test/CodeGen/Mips/brconlt.ll
llvm/trunk/test/CodeGen/Mips/brconne.ll
llvm/trunk/test/CodeGen/Mips/brconnek.ll
llvm/trunk/test/CodeGen/Mips/brconnez.ll
llvm/trunk/test/CodeGen/Mips/brdelayslot.ll
llvm/trunk/test/CodeGen/Mips/brind.ll
llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-byte.ll
llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-combinations.ll
llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs-small-structs-multiple-args.ll
llvm/trunk/test/CodeGen/Mips/cconv/return-float.ll
llvm/trunk/test/CodeGen/Mips/cconv/return-hard-float.ll
llvm/trunk/test/CodeGen/Mips/cconv/return-hard-fp128.ll
llvm/trunk/test/CodeGen/Mips/cconv/return-hard-struct-f128.ll
llvm/trunk/test/CodeGen/Mips/cconv/return-struct.ll
llvm/trunk/test/CodeGen/Mips/cconv/return.ll
llvm/trunk/test/CodeGen/Mips/cfi_offset.ll
llvm/trunk/test/CodeGen/Mips/ci2.ll
llvm/trunk/test/CodeGen/Mips/cmov.ll
llvm/trunk/test/CodeGen/Mips/cmplarge.ll
llvm/trunk/test/CodeGen/Mips/const4a.ll
llvm/trunk/test/CodeGen/Mips/ctlz.ll
llvm/trunk/test/CodeGen/Mips/disable-tail-merge.ll
llvm/trunk/test/CodeGen/Mips/div.ll
llvm/trunk/test/CodeGen/Mips/div_rem.ll
llvm/trunk/test/CodeGen/Mips/divrem.ll
llvm/trunk/test/CodeGen/Mips/divu.ll
llvm/trunk/test/CodeGen/Mips/divu_remu.ll
llvm/trunk/test/CodeGen/Mips/dsp-patterns.ll
llvm/trunk/test/CodeGen/Mips/dsp-vec-load-store.ll
llvm/trunk/test/CodeGen/Mips/eh.ll
llvm/trunk/test/CodeGen/Mips/emit-big-cst.ll
llvm/trunk/test/CodeGen/Mips/ex2.ll
llvm/trunk/test/CodeGen/Mips/extins.ll
llvm/trunk/test/CodeGen/Mips/f16abs.ll
llvm/trunk/test/CodeGen/Mips/fastcc.ll
llvm/trunk/test/CodeGen/Mips/fixdfsf.ll
llvm/trunk/test/CodeGen/Mips/fp-indexed-ls.ll
llvm/trunk/test/CodeGen/Mips/fp-spill-reload.ll
llvm/trunk/test/CodeGen/Mips/fp16instrinsmc.ll
llvm/trunk/test/CodeGen/Mips/fp16static.ll
llvm/trunk/test/CodeGen/Mips/fpneeded.ll
llvm/trunk/test/CodeGen/Mips/fpnotneeded.ll
llvm/trunk/test/CodeGen/Mips/global-address.ll
llvm/trunk/test/CodeGen/Mips/gpreg-lazy-binding.ll
llvm/trunk/test/CodeGen/Mips/gprestore.ll
llvm/trunk/test/CodeGen/Mips/hf16_1.ll
llvm/trunk/test/CodeGen/Mips/hf16call32.ll
llvm/trunk/test/CodeGen/Mips/hf16call32_body.ll
llvm/trunk/test/CodeGen/Mips/hf1_body.ll
llvm/trunk/test/CodeGen/Mips/hfptrcall.ll
llvm/trunk/test/CodeGen/Mips/inlineasm-assembler-directives.ll
llvm/trunk/test/CodeGen/Mips/inlineasm-operand-code.ll
llvm/trunk/test/CodeGen/Mips/inlineasm64.ll
llvm/trunk/test/CodeGen/Mips/internalfunc.ll
llvm/trunk/test/CodeGen/Mips/jtstat.ll
llvm/trunk/test/CodeGen/Mips/l3mc.ll
llvm/trunk/test/CodeGen/Mips/lb1.ll
llvm/trunk/test/CodeGen/Mips/lbu1.ll
llvm/trunk/test/CodeGen/Mips/lcb2.ll
llvm/trunk/test/CodeGen/Mips/lcb3c.ll
llvm/trunk/test/CodeGen/Mips/lcb4a.ll
llvm/trunk/test/CodeGen/Mips/lcb5.ll
llvm/trunk/test/CodeGen/Mips/lh1.ll
llvm/trunk/test/CodeGen/Mips/lhu1.ll
llvm/trunk/test/CodeGen/Mips/llcarry.ll
llvm/trunk/test/CodeGen/Mips/load-store-left-right.ll
llvm/trunk/test/CodeGen/Mips/machineverifier.ll
llvm/trunk/test/CodeGen/Mips/mbrsize4a.ll
llvm/trunk/test/CodeGen/Mips/micromips-addiu.ll
llvm/trunk/test/CodeGen/Mips/micromips-and16.ll
llvm/trunk/test/CodeGen/Mips/micromips-andi.ll
llvm/trunk/test/CodeGen/Mips/micromips-compact-branches.ll
llvm/trunk/test/CodeGen/Mips/micromips-delay-slot-jr.ll
llvm/trunk/test/CodeGen/Mips/micromips-delay-slot.ll
llvm/trunk/test/CodeGen/Mips/micromips-gp-rc.ll
llvm/trunk/test/CodeGen/Mips/micromips-jal.ll
llvm/trunk/test/CodeGen/Mips/micromips-load-effective-address.ll
llvm/trunk/test/CodeGen/Mips/micromips-or16.ll
llvm/trunk/test/CodeGen/Mips/micromips-rdhwr-directives.ll
llvm/trunk/test/CodeGen/Mips/micromips-shift.ll
llvm/trunk/test/CodeGen/Mips/micromips-sw-lw-16.ll
llvm/trunk/test/CodeGen/Mips/micromips-xor16.ll
llvm/trunk/test/CodeGen/Mips/mips16_32_8.ll
llvm/trunk/test/CodeGen/Mips/mips16_fpret.ll
llvm/trunk/test/CodeGen/Mips/mips16ex.ll
llvm/trunk/test/CodeGen/Mips/mips16fpe.ll
llvm/trunk/test/CodeGen/Mips/mips64-f128-call.ll
llvm/trunk/test/CodeGen/Mips/mips64-f128.ll
llvm/trunk/test/CodeGen/Mips/mips64directive.ll
llvm/trunk/test/CodeGen/Mips/mips64fpldst.ll
llvm/trunk/test/CodeGen/Mips/mips64instrs.ll
llvm/trunk/test/CodeGen/Mips/mips64intldst.ll
llvm/trunk/test/CodeGen/Mips/mips64sinttofpsf.ll
llvm/trunk/test/CodeGen/Mips/mipslopat.ll
llvm/trunk/test/CodeGen/Mips/misha.ll
llvm/trunk/test/CodeGen/Mips/mno-ldc1-sdc1.ll
llvm/trunk/test/CodeGen/Mips/msa/2r.ll
llvm/trunk/test/CodeGen/Mips/msa/2r_vector_scalar.ll
llvm/trunk/test/CodeGen/Mips/msa/2rf.ll
llvm/trunk/test/CodeGen/Mips/msa/2rf_exup.ll
llvm/trunk/test/CodeGen/Mips/msa/2rf_float_int.ll
llvm/trunk/test/CodeGen/Mips/msa/2rf_fq.ll
llvm/trunk/test/CodeGen/Mips/msa/2rf_int_float.ll
llvm/trunk/test/CodeGen/Mips/msa/2rf_tq.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-a.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-b.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-c.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-d.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-i.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-m.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-p.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-s.ll
llvm/trunk/test/CodeGen/Mips/msa/3r-v.ll
llvm/trunk/test/CodeGen/Mips/msa/3r_4r.ll
llvm/trunk/test/CodeGen/Mips/msa/3r_4r_widen.ll
llvm/trunk/test/CodeGen/Mips/msa/3r_splat.ll
llvm/trunk/test/CodeGen/Mips/msa/3rf.ll
llvm/trunk/test/CodeGen/Mips/msa/3rf_4rf.ll
llvm/trunk/test/CodeGen/Mips/msa/3rf_4rf_q.ll
llvm/trunk/test/CodeGen/Mips/msa/3rf_exdo.ll
llvm/trunk/test/CodeGen/Mips/msa/3rf_float_int.ll
llvm/trunk/test/CodeGen/Mips/msa/3rf_int_float.ll
llvm/trunk/test/CodeGen/Mips/msa/3rf_q.ll
llvm/trunk/test/CodeGen/Mips/msa/arithmetic.ll
llvm/trunk/test/CodeGen/Mips/msa/arithmetic_float.ll
llvm/trunk/test/CodeGen/Mips/msa/basic_operations.ll
llvm/trunk/test/CodeGen/Mips/msa/basic_operations_float.ll
llvm/trunk/test/CodeGen/Mips/msa/bit.ll
llvm/trunk/test/CodeGen/Mips/msa/bitcast.ll
llvm/trunk/test/CodeGen/Mips/msa/bitwise.ll
llvm/trunk/test/CodeGen/Mips/msa/compare.ll
llvm/trunk/test/CodeGen/Mips/msa/compare_float.ll
llvm/trunk/test/CodeGen/Mips/msa/elm_copy.ll
llvm/trunk/test/CodeGen/Mips/msa/elm_insv.ll
llvm/trunk/test/CodeGen/Mips/msa/elm_move.ll
llvm/trunk/test/CodeGen/Mips/msa/elm_shift_slide.ll
llvm/trunk/test/CodeGen/Mips/msa/frameindex.ll
llvm/trunk/test/CodeGen/Mips/msa/i10.ll
llvm/trunk/test/CodeGen/Mips/msa/i5-a.ll
llvm/trunk/test/CodeGen/Mips/msa/i5-b.ll
llvm/trunk/test/CodeGen/Mips/msa/i5-c.ll
llvm/trunk/test/CodeGen/Mips/msa/i5-m.ll
llvm/trunk/test/CodeGen/Mips/msa/i5-s.ll
llvm/trunk/test/CodeGen/Mips/msa/i5_ld_st.ll
llvm/trunk/test/CodeGen/Mips/msa/i8.ll
llvm/trunk/test/CodeGen/Mips/msa/inline-asm.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s1704963983.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s1935737938.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s2704903805.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s3861334421.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s3926023935.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s3997499501.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s525530439.ll
llvm/trunk/test/CodeGen/Mips/msa/llvm-stress-s997348632.ll
llvm/trunk/test/CodeGen/Mips/msa/shuffle.ll
llvm/trunk/test/CodeGen/Mips/msa/spill.ll
llvm/trunk/test/CodeGen/Mips/msa/vec.ll
llvm/trunk/test/CodeGen/Mips/msa/vecs10.ll
llvm/trunk/test/CodeGen/Mips/mul.ll
llvm/trunk/test/CodeGen/Mips/mulll.ll
llvm/trunk/test/CodeGen/Mips/mulull.ll
llvm/trunk/test/CodeGen/Mips/nacl-align.ll
llvm/trunk/test/CodeGen/Mips/nacl-branch-delay.ll
llvm/trunk/test/CodeGen/Mips/nacl-reserved-regs.ll
llvm/trunk/test/CodeGen/Mips/neg1.ll
llvm/trunk/test/CodeGen/Mips/no-odd-spreg-msa.ll
llvm/trunk/test/CodeGen/Mips/nomips16.ll
llvm/trunk/test/CodeGen/Mips/not1.ll
llvm/trunk/test/CodeGen/Mips/o32_cc_byval.ll
llvm/trunk/test/CodeGen/Mips/o32_cc_vararg.ll
llvm/trunk/test/CodeGen/Mips/optimize-pic-o0.ll
llvm/trunk/test/CodeGen/Mips/or1.ll
llvm/trunk/test/CodeGen/Mips/prevent-hoisting.ll
llvm/trunk/test/CodeGen/Mips/private.ll
llvm/trunk/test/CodeGen/Mips/ra-allocatable.ll
llvm/trunk/test/CodeGen/Mips/rdhwr-directives.ll
llvm/trunk/test/CodeGen/Mips/rem.ll
llvm/trunk/test/CodeGen/Mips/remu.ll
llvm/trunk/test/CodeGen/Mips/s2rem.ll
llvm/trunk/test/CodeGen/Mips/sb1.ll
llvm/trunk/test/CodeGen/Mips/sel1c.ll
llvm/trunk/test/CodeGen/Mips/sel2c.ll
llvm/trunk/test/CodeGen/Mips/selTBteqzCmpi.ll
llvm/trunk/test/CodeGen/Mips/selTBtnezCmpi.ll
llvm/trunk/test/CodeGen/Mips/selTBtnezSlti.ll
llvm/trunk/test/CodeGen/Mips/select.ll
llvm/trunk/test/CodeGen/Mips/seleq.ll
llvm/trunk/test/CodeGen/Mips/seleqk.ll
llvm/trunk/test/CodeGen/Mips/selgek.ll
llvm/trunk/test/CodeGen/Mips/selgt.ll
llvm/trunk/test/CodeGen/Mips/selle.ll
llvm/trunk/test/CodeGen/Mips/selltk.ll
llvm/trunk/test/CodeGen/Mips/selne.ll
llvm/trunk/test/CodeGen/Mips/selnek.ll
llvm/trunk/test/CodeGen/Mips/selpat.ll
llvm/trunk/test/CodeGen/Mips/seteq.ll
llvm/trunk/test/CodeGen/Mips/seteqz.ll
llvm/trunk/test/CodeGen/Mips/setge.ll
llvm/trunk/test/CodeGen/Mips/setgek.ll
llvm/trunk/test/CodeGen/Mips/setle.ll
llvm/trunk/test/CodeGen/Mips/setlt.ll
llvm/trunk/test/CodeGen/Mips/setltk.ll
llvm/trunk/test/CodeGen/Mips/setne.ll
llvm/trunk/test/CodeGen/Mips/setuge.ll
llvm/trunk/test/CodeGen/Mips/setugt.ll
llvm/trunk/test/CodeGen/Mips/setule.ll
llvm/trunk/test/CodeGen/Mips/setult.ll
llvm/trunk/test/CodeGen/Mips/setultk.ll
llvm/trunk/test/CodeGen/Mips/sh1.ll
llvm/trunk/test/CodeGen/Mips/simplebr.ll
llvm/trunk/test/CodeGen/Mips/sitofp-selectcc-opt.ll
llvm/trunk/test/CodeGen/Mips/sll1.ll
llvm/trunk/test/CodeGen/Mips/sll2.ll
llvm/trunk/test/CodeGen/Mips/small-section-reserve-gp.ll
llvm/trunk/test/CodeGen/Mips/spill-copy-acreg.ll
llvm/trunk/test/CodeGen/Mips/sra1.ll
llvm/trunk/test/CodeGen/Mips/sra2.ll
llvm/trunk/test/CodeGen/Mips/srl1.ll
llvm/trunk/test/CodeGen/Mips/srl2.ll
llvm/trunk/test/CodeGen/Mips/stackcoloring.ll
llvm/trunk/test/CodeGen/Mips/stchar.ll
llvm/trunk/test/CodeGen/Mips/stldst.ll
llvm/trunk/test/CodeGen/Mips/sub1.ll
llvm/trunk/test/CodeGen/Mips/sub2.ll
llvm/trunk/test/CodeGen/Mips/tailcall.ll
llvm/trunk/test/CodeGen/Mips/tls.ll
llvm/trunk/test/CodeGen/Mips/tls16.ll
llvm/trunk/test/CodeGen/Mips/tls16_2.ll
llvm/trunk/test/CodeGen/Mips/uitofp.ll
llvm/trunk/test/CodeGen/Mips/vector-load-store.ll
llvm/trunk/test/CodeGen/Mips/vector-setcc.ll
llvm/trunk/test/CodeGen/Mips/xor1.ll
llvm/trunk/test/CodeGen/Mips/zeroreg.ll
llvm/trunk/test/CodeGen/NVPTX/access-non-generic.ll
llvm/trunk/test/CodeGen/NVPTX/addrspacecast.ll
llvm/trunk/test/CodeGen/NVPTX/bug21465.ll
llvm/trunk/test/CodeGen/NVPTX/bug22322.ll
llvm/trunk/test/CodeGen/NVPTX/call-with-alloca-buffer.ll
llvm/trunk/test/CodeGen/NVPTX/fp16.ll
llvm/trunk/test/CodeGen/NVPTX/generic-to-nvvm.ll
llvm/trunk/test/CodeGen/NVPTX/half.ll
llvm/trunk/test/CodeGen/NVPTX/i1-global.ll
llvm/trunk/test/CodeGen/NVPTX/i8-param.ll
llvm/trunk/test/CodeGen/NVPTX/ld-addrspace.ll
llvm/trunk/test/CodeGen/NVPTX/ld-generic.ll
llvm/trunk/test/CodeGen/NVPTX/load-sext-i1.ll
llvm/trunk/test/CodeGen/NVPTX/machine-sink.ll
llvm/trunk/test/CodeGen/NVPTX/misaligned-vector-ldst.ll
llvm/trunk/test/CodeGen/NVPTX/noduplicate-syncthreads.ll
llvm/trunk/test/CodeGen/NVPTX/nounroll.ll
llvm/trunk/test/CodeGen/NVPTX/pr13291-i1-store.ll
llvm/trunk/test/CodeGen/NVPTX/pr16278.ll
llvm/trunk/test/CodeGen/NVPTX/refl1.ll
llvm/trunk/test/CodeGen/NVPTX/sched1.ll
llvm/trunk/test/CodeGen/NVPTX/sched2.ll
llvm/trunk/test/CodeGen/NVPTX/shift-parts.ll
llvm/trunk/test/CodeGen/NVPTX/simple-call.ll
llvm/trunk/test/CodeGen/NVPTX/vector-compare.ll
llvm/trunk/test/CodeGen/NVPTX/vector-loads.ll
llvm/trunk/test/CodeGen/NVPTX/vector-select.ll
llvm/trunk/test/CodeGen/NVPTX/weak-global.ll
llvm/trunk/test/CodeGen/PowerPC/2005-11-30-vastart-crash.ll
llvm/trunk/test/CodeGen/PowerPC/2006-01-20-ShiftPartsCrash.ll
llvm/trunk/test/CodeGen/PowerPC/2006-04-05-splat-ish.ll
llvm/trunk/test/CodeGen/PowerPC/2006-05-12-rlwimi-crash.ll
llvm/trunk/test/CodeGen/PowerPC/2006-07-07-ComputeMaskedBits.ll
llvm/trunk/test/CodeGen/PowerPC/2006-07-19-stwbrx-crash.ll
llvm/trunk/test/CodeGen/PowerPC/2006-08-15-SelectionCrash.ll
llvm/trunk/test/CodeGen/PowerPC/2006-12-07-SelectCrash.ll
llvm/trunk/test/CodeGen/PowerPC/2007-01-15-AsmDialect.ll
llvm/trunk/test/CodeGen/PowerPC/2007-03-24-cntlzd.ll
llvm/trunk/test/CodeGen/PowerPC/2007-03-30-SpillerCrash.ll
llvm/trunk/test/CodeGen/PowerPC/2007-04-30-InlineAsmEarlyClobber.ll
llvm/trunk/test/CodeGen/PowerPC/2007-05-22-tailmerge-3.ll
llvm/trunk/test/CodeGen/PowerPC/2007-09-07-LoadStoreIdxForms.ll
llvm/trunk/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
llvm/trunk/test/CodeGen/PowerPC/2007-10-18-PtrArithmetic.ll
llvm/trunk/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert.ll
llvm/trunk/test/CodeGen/PowerPC/2007-10-21-LocalRegAllocAssert2.ll
llvm/trunk/test/CodeGen/PowerPC/2007-11-16-landingpad-split.ll
llvm/trunk/test/CodeGen/PowerPC/2007-11-19-VectorSplitting.ll
llvm/trunk/test/CodeGen/PowerPC/2008-02-09-LocalRegAllocAssert.ll
llvm/trunk/test/CodeGen/PowerPC/2008-03-05-RegScavengerAssert.ll
llvm/trunk/test/CodeGen/PowerPC/2008-03-17-RegScavengerCrash.ll
llvm/trunk/test/CodeGen/PowerPC/2008-03-24-AddressRegImm.ll
llvm/trunk/test/CodeGen/PowerPC/2008-03-26-CoalescerBug.ll
llvm/trunk/test/CodeGen/PowerPC/2008-04-23-CoalescerCrash.ll
llvm/trunk/test/CodeGen/PowerPC/2008-06-21-F128LoadStore.ll
llvm/trunk/test/CodeGen/PowerPC/2008-06-23-LiveVariablesCrash.ll
llvm/trunk/test/CodeGen/PowerPC/2008-07-15-Bswap.ll
llvm/trunk/test/CodeGen/PowerPC/2008-07-15-SignExtendInreg.ll
llvm/trunk/test/CodeGen/PowerPC/2008-09-12-CoalescerBug.ll
llvm/trunk/test/CodeGen/PowerPC/2008-10-28-UnprocessedNode.ll
llvm/trunk/test/CodeGen/PowerPC/2008-10-31-PPCF128Libcalls.ll
llvm/trunk/test/CodeGen/PowerPC/2009-08-17-inline-asm-addr-mode-breakage.ll
llvm/trunk/test/CodeGen/PowerPC/2010-03-09-indirect-call.ll
llvm/trunk/test/CodeGen/PowerPC/2010-12-18-PPCStackRefs.ll
llvm/trunk/test/CodeGen/PowerPC/2011-12-05-NoSpillDupCR.ll
llvm/trunk/test/CodeGen/PowerPC/2011-12-06-SpillAndRestoreCR.ll
llvm/trunk/test/CodeGen/PowerPC/2011-12-08-DemandedBitsMiscompile.ll
llvm/trunk/test/CodeGen/PowerPC/Atomics-64.ll
llvm/trunk/test/CodeGen/PowerPC/a2-fp-basic.ll
llvm/trunk/test/CodeGen/PowerPC/addi-licm.ll
llvm/trunk/test/CodeGen/PowerPC/addi-reassoc.ll
llvm/trunk/test/CodeGen/PowerPC/alias.ll
llvm/trunk/test/CodeGen/PowerPC/and-elim.ll
llvm/trunk/test/CodeGen/PowerPC/anon_aggr.ll
llvm/trunk/test/CodeGen/PowerPC/asm-constraints.ll
llvm/trunk/test/CodeGen/PowerPC/atomic-2.ll
llvm/trunk/test/CodeGen/PowerPC/atomics-indexed.ll
llvm/trunk/test/CodeGen/PowerPC/atomics.ll
llvm/trunk/test/CodeGen/PowerPC/bdzlr.ll
llvm/trunk/test/CodeGen/PowerPC/bswap-load-store.ll
llvm/trunk/test/CodeGen/PowerPC/buildvec_canonicalize.ll
llvm/trunk/test/CodeGen/PowerPC/byval-aliased.ll
llvm/trunk/test/CodeGen/PowerPC/code-align.ll
llvm/trunk/test/CodeGen/PowerPC/complex-return.ll
llvm/trunk/test/CodeGen/PowerPC/cr-spills.ll
llvm/trunk/test/CodeGen/PowerPC/crbits.ll
llvm/trunk/test/CodeGen/PowerPC/crsave.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-cpsgn.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-fp64.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-i64.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-le.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-lt.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-ne.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-s000.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-sh.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloop-sums.ll
llvm/trunk/test/CodeGen/PowerPC/ctrloops.ll
llvm/trunk/test/CodeGen/PowerPC/dcbt-sched.ll
llvm/trunk/test/CodeGen/PowerPC/delete-node.ll
llvm/trunk/test/CodeGen/PowerPC/dyn-alloca-aligned.ll
llvm/trunk/test/CodeGen/PowerPC/emptystruct.ll
llvm/trunk/test/CodeGen/PowerPC/eqv-andc-orc-nor.ll
llvm/trunk/test/CodeGen/PowerPC/fast-isel-GEP-coalesce.ll
llvm/trunk/test/CodeGen/PowerPC/fast-isel-call.ll
llvm/trunk/test/CodeGen/PowerPC/fast-isel-fold.ll
llvm/trunk/test/CodeGen/PowerPC/fast-isel-load-store.ll
llvm/trunk/test/CodeGen/PowerPC/fast-isel-redefinition.ll
llvm/trunk/test/CodeGen/PowerPC/fastisel-gep-promote-before-add.ll
llvm/trunk/test/CodeGen/PowerPC/floatPSA.ll
llvm/trunk/test/CodeGen/PowerPC/flt-preinc.ll
llvm/trunk/test/CodeGen/PowerPC/fp-to-int-ext.ll
llvm/trunk/test/CodeGen/PowerPC/frounds.ll
llvm/trunk/test/CodeGen/PowerPC/glob-comp-aa-crash.ll
llvm/trunk/test/CodeGen/PowerPC/hidden-vis-2.ll
llvm/trunk/test/CodeGen/PowerPC/hidden-vis.ll
llvm/trunk/test/CodeGen/PowerPC/ia-mem-r0.ll
llvm/trunk/test/CodeGen/PowerPC/indexed-load.ll
llvm/trunk/test/CodeGen/PowerPC/indirectbr.ll
llvm/trunk/test/CodeGen/PowerPC/inlineasm-i64-reg.ll
llvm/trunk/test/CodeGen/PowerPC/isel-rc-nox0.ll
llvm/trunk/test/CodeGen/PowerPC/lbz-from-ld-shift.ll
llvm/trunk/test/CodeGen/PowerPC/lbzux.ll
llvm/trunk/test/CodeGen/PowerPC/ld-st-upd.ll
llvm/trunk/test/CodeGen/PowerPC/ldtoc-inv.ll
llvm/trunk/test/CodeGen/PowerPC/lha.ll
llvm/trunk/test/CodeGen/PowerPC/load-constant-addr.ll
llvm/trunk/test/CodeGen/PowerPC/load-shift-combine.ll
llvm/trunk/test/CodeGen/PowerPC/loop-data-prefetch.ll
llvm/trunk/test/CodeGen/PowerPC/lsa.ll
llvm/trunk/test/CodeGen/PowerPC/lsr-postinc-pos.ll
llvm/trunk/test/CodeGen/PowerPC/mask64.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-1.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-10.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-11.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-2.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-3.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-5.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-6.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-7.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-8.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-9.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-default.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-obj-2.ll
llvm/trunk/test/CodeGen/PowerPC/mcm-obj.ll
llvm/trunk/test/CodeGen/PowerPC/mem-rr-addr-mode.ll
llvm/trunk/test/CodeGen/PowerPC/mem_update.ll
llvm/trunk/test/CodeGen/PowerPC/misched-inorder-latency.ll
llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc.ll
llvm/trunk/test/CodeGen/PowerPC/mult-alt-generic-powerpc64.ll
llvm/trunk/test/CodeGen/PowerPC/no-extra-fp-conv-ldst.ll
llvm/trunk/test/CodeGen/PowerPC/novrsave.ll
llvm/trunk/test/CodeGen/PowerPC/or-addressing-mode.ll
llvm/trunk/test/CodeGen/PowerPC/post-ra-ec.ll
llvm/trunk/test/CodeGen/PowerPC/ppc-prologue.ll
llvm/trunk/test/CodeGen/PowerPC/ppc32-lshrti3.ll
llvm/trunk/test/CodeGen/PowerPC/ppc32-pic-large.ll
llvm/trunk/test/CodeGen/PowerPC/ppc32-pic.ll
llvm/trunk/test/CodeGen/PowerPC/ppc440-fp-basic.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-abi-extend.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-align-long-double.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-byval-align.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-calls.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-gep-opt.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-patchpoint.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-smallarg.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64-toc.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64le-aggregates.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64le-localentry.ll
llvm/trunk/test/CodeGen/PowerPC/ppc64le-smallarg.ll
llvm/trunk/test/CodeGen/PowerPC/ppcf128-1.ll
llvm/trunk/test/CodeGen/PowerPC/ppcf128-endian.ll
llvm/trunk/test/CodeGen/PowerPC/pr13891.ll
llvm/trunk/test/CodeGen/PowerPC/pr15031.ll
llvm/trunk/test/CodeGen/PowerPC/pr15630.ll
llvm/trunk/test/CodeGen/PowerPC/pr16556-2.ll
llvm/trunk/test/CodeGen/PowerPC/pr17168.ll
llvm/trunk/test/CodeGen/PowerPC/pr18663.ll
llvm/trunk/test/CodeGen/PowerPC/pr20442.ll
llvm/trunk/test/CodeGen/PowerPC/preincprep-invoke.ll
llvm/trunk/test/CodeGen/PowerPC/private.ll
llvm/trunk/test/CodeGen/PowerPC/pwr7-gt-nop.ll
llvm/trunk/test/CodeGen/PowerPC/qpx-load.ll
llvm/trunk/test/CodeGen/PowerPC/qpx-s-load.ll
llvm/trunk/test/CodeGen/PowerPC/qpx-s-sel.ll
llvm/trunk/test/CodeGen/PowerPC/qpx-sel.ll
llvm/trunk/test/CodeGen/PowerPC/qpx-unalperm.ll
llvm/trunk/test/CodeGen/PowerPC/quadint-return.ll
llvm/trunk/test/CodeGen/PowerPC/reg-coalesce-simple.ll
llvm/trunk/test/CodeGen/PowerPC/reloc-align.ll
llvm/trunk/test/CodeGen/PowerPC/resolvefi-basereg.ll
llvm/trunk/test/CodeGen/PowerPC/resolvefi-disp.ll
llvm/trunk/test/CodeGen/PowerPC/return-val-i128.ll
llvm/trunk/test/CodeGen/PowerPC/rlwimi-and.ll
llvm/trunk/test/CodeGen/PowerPC/rlwimi-commute.ll
llvm/trunk/test/CodeGen/PowerPC/rlwimi-dyn-and.ll
llvm/trunk/test/CodeGen/PowerPC/rm-zext.ll
llvm/trunk/test/CodeGen/PowerPC/rs-undef-use.ll
llvm/trunk/test/CodeGen/PowerPC/s000-alias-misched.ll
llvm/trunk/test/CodeGen/PowerPC/sjlj.ll
llvm/trunk/test/CodeGen/PowerPC/small-arguments.ll
llvm/trunk/test/CodeGen/PowerPC/split-index-tc.ll
llvm/trunk/test/CodeGen/PowerPC/stack-protector.ll
llvm/trunk/test/CodeGen/PowerPC/stack-realign.ll
llvm/trunk/test/CodeGen/PowerPC/std-unal-fi.ll
llvm/trunk/test/CodeGen/PowerPC/store-load-fwd.ll
llvm/trunk/test/CodeGen/PowerPC/structsinmem.ll
llvm/trunk/test/CodeGen/PowerPC/structsinregs.ll
llvm/trunk/test/CodeGen/PowerPC/subreg-postra-2.ll
llvm/trunk/test/CodeGen/PowerPC/subreg-postra.ll
llvm/trunk/test/CodeGen/PowerPC/subsumes-pred-regs.ll
llvm/trunk/test/CodeGen/PowerPC/tls-cse.ll
llvm/trunk/test/CodeGen/PowerPC/tls-pic.ll
llvm/trunk/test/CodeGen/PowerPC/tls.ll
llvm/trunk/test/CodeGen/PowerPC/toc-load-sched-bug.ll
llvm/trunk/test/CodeGen/PowerPC/trampoline.ll
llvm/trunk/test/CodeGen/PowerPC/unal-altivec-wint.ll
llvm/trunk/test/CodeGen/PowerPC/unal-altivec.ll
llvm/trunk/test/CodeGen/PowerPC/unal-altivec2.ll
llvm/trunk/test/CodeGen/PowerPC/unaligned.ll
llvm/trunk/test/CodeGen/PowerPC/vaddsplat.ll
llvm/trunk/test/CodeGen/PowerPC/varargs-struct-float.ll
llvm/trunk/test/CodeGen/PowerPC/vcmp-fold.ll
llvm/trunk/test/CodeGen/PowerPC/vec-abi-align.ll
llvm/trunk/test/CodeGen/PowerPC/vec_auto_constant.ll
llvm/trunk/test/CodeGen/PowerPC/vec_br_cmp.ll
llvm/trunk/test/CodeGen/PowerPC/vec_buildvector_loadstore.ll
llvm/trunk/test/CodeGen/PowerPC/vec_constants.ll
llvm/trunk/test/CodeGen/PowerPC/vec_conv.ll
llvm/trunk/test/CodeGen/PowerPC/vec_fneg.ll
llvm/trunk/test/CodeGen/PowerPC/vec_misaligned.ll
llvm/trunk/test/CodeGen/PowerPC/vec_mul.ll
llvm/trunk/test/CodeGen/PowerPC/vec_perf_shuffle.ll
llvm/trunk/test/CodeGen/PowerPC/vec_shuffle.ll
llvm/trunk/test/CodeGen/PowerPC/vec_shuffle_le.ll
llvm/trunk/test/CodeGen/PowerPC/vec_splat.ll
llvm/trunk/test/CodeGen/PowerPC/vec_splat_constant.ll
llvm/trunk/test/CodeGen/PowerPC/vec_zero.ll
llvm/trunk/test/CodeGen/PowerPC/vector-identity-shuffle.ll
llvm/trunk/test/CodeGen/PowerPC/vector.ll
llvm/trunk/test/CodeGen/PowerPC/vsx-div.ll
llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy1.ll
llvm/trunk/test/CodeGen/PowerPC/vsx-infl-copy2.ll
llvm/trunk/test/CodeGen/PowerPC/vsx-ldst-builtin-le.ll
llvm/trunk/test/CodeGen/PowerPC/vsx-ldst.ll
llvm/trunk/test/CodeGen/PowerPC/vsx-minmax.ll
llvm/trunk/test/CodeGen/PowerPC/vsx-p8.ll
llvm/trunk/test/CodeGen/PowerPC/vsx.ll
llvm/trunk/test/CodeGen/PowerPC/vsx_insert_extract_le.ll
llvm/trunk/test/CodeGen/PowerPC/vsx_shuffle_le.ll
llvm/trunk/test/CodeGen/PowerPC/weak_def_can_be_hidden.ll
llvm/trunk/test/CodeGen/PowerPC/zero-not-run.ll
llvm/trunk/test/CodeGen/PowerPC/zext-free.ll
llvm/trunk/test/CodeGen/R600/32-bit-local-address-space.ll
llvm/trunk/test/CodeGen/R600/add-debug.ll
llvm/trunk/test/CodeGen/R600/add.ll
llvm/trunk/test/CodeGen/R600/add_i64.ll
llvm/trunk/test/CodeGen/R600/address-space.ll
llvm/trunk/test/CodeGen/R600/and.ll
llvm/trunk/test/CodeGen/R600/array-ptr-calc-i32.ll
llvm/trunk/test/CodeGen/R600/array-ptr-calc-i64.ll
llvm/trunk/test/CodeGen/R600/big_alu.ll
llvm/trunk/test/CodeGen/R600/bitcast.ll
llvm/trunk/test/CodeGen/R600/bswap.ll
llvm/trunk/test/CodeGen/R600/call.ll
llvm/trunk/test/CodeGen/R600/combine_vloads.ll
llvm/trunk/test/CodeGen/R600/commute_modifiers.ll
llvm/trunk/test/CodeGen/R600/copy-illegal-type.ll
llvm/trunk/test/CodeGen/R600/copy-to-reg.ll
llvm/trunk/test/CodeGen/R600/ctlz_zero_undef.ll
llvm/trunk/test/CodeGen/R600/ctpop.ll
llvm/trunk/test/CodeGen/R600/ctpop64.ll
llvm/trunk/test/CodeGen/R600/cttz_zero_undef.ll
llvm/trunk/test/CodeGen/R600/cvt_f32_ubyte.ll
llvm/trunk/test/CodeGen/R600/dagcombiner-bug-illegal-vec4-int-to-fp.ll
llvm/trunk/test/CodeGen/R600/dot4-folding.ll
llvm/trunk/test/CodeGen/R600/ds-negative-offset-addressing-mode-loop.ll
llvm/trunk/test/CodeGen/R600/ds_read2.ll
llvm/trunk/test/CodeGen/R600/ds_read2_offset_order.ll
llvm/trunk/test/CodeGen/R600/ds_read2st64.ll
llvm/trunk/test/CodeGen/R600/ds_write2.ll
llvm/trunk/test/CodeGen/R600/ds_write2st64.ll
llvm/trunk/test/CodeGen/R600/extload-private.ll
llvm/trunk/test/CodeGen/R600/extload.ll
llvm/trunk/test/CodeGen/R600/fabs.f64.ll
llvm/trunk/test/CodeGen/R600/fadd.ll
llvm/trunk/test/CodeGen/R600/fadd64.ll
llvm/trunk/test/CodeGen/R600/fcmp-cnd.ll
llvm/trunk/test/CodeGen/R600/fcmp-cnde-int-args.ll
llvm/trunk/test/CodeGen/R600/fcmp.ll
llvm/trunk/test/CodeGen/R600/fcmp64.ll
llvm/trunk/test/CodeGen/R600/fconst64.ll
llvm/trunk/test/CodeGen/R600/fdiv.f64.ll
llvm/trunk/test/CodeGen/R600/fdiv.ll
llvm/trunk/test/CodeGen/R600/fetch-limits.r600.ll
llvm/trunk/test/CodeGen/R600/fetch-limits.r700+.ll
llvm/trunk/test/CodeGen/R600/flat-address-space.ll
llvm/trunk/test/CodeGen/R600/fma-combine.ll
llvm/trunk/test/CodeGen/R600/fma.f64.ll
llvm/trunk/test/CodeGen/R600/fma.ll
llvm/trunk/test/CodeGen/R600/fmax3.ll
llvm/trunk/test/CodeGen/R600/fmax_legacy.f64.ll
llvm/trunk/test/CodeGen/R600/fmax_legacy.ll
llvm/trunk/test/CodeGen/R600/fmin3.ll
llvm/trunk/test/CodeGen/R600/fmin_legacy.f64.ll
llvm/trunk/test/CodeGen/R600/fmin_legacy.ll
llvm/trunk/test/CodeGen/R600/fmul.ll
llvm/trunk/test/CodeGen/R600/fmul64.ll
llvm/trunk/test/CodeGen/R600/fmuladd.ll
llvm/trunk/test/CodeGen/R600/fneg-fabs.f64.ll
llvm/trunk/test/CodeGen/R600/fneg-fabs.ll
llvm/trunk/test/CodeGen/R600/fp16_to_fp.ll
llvm/trunk/test/CodeGen/R600/fp32_to_fp16.ll
llvm/trunk/test/CodeGen/R600/fp_to_sint.f64.ll
llvm/trunk/test/CodeGen/R600/fp_to_sint.ll
llvm/trunk/test/CodeGen/R600/fp_to_uint.f64.ll
llvm/trunk/test/CodeGen/R600/fp_to_uint.ll
llvm/trunk/test/CodeGen/R600/frem.ll
llvm/trunk/test/CodeGen/R600/fsqrt.ll
llvm/trunk/test/CodeGen/R600/fsub.ll
llvm/trunk/test/CodeGen/R600/fsub64.ll
llvm/trunk/test/CodeGen/R600/ftrunc.f64.ll
llvm/trunk/test/CodeGen/R600/global-directive.ll
llvm/trunk/test/CodeGen/R600/global-extload-i1.ll
llvm/trunk/test/CodeGen/R600/global-extload-i16.ll
llvm/trunk/test/CodeGen/R600/global-extload-i32.ll
llvm/trunk/test/CodeGen/R600/global-extload-i8.ll
llvm/trunk/test/CodeGen/R600/global-zero-initializer.ll
llvm/trunk/test/CodeGen/R600/gv-const-addrspace-fail.ll
llvm/trunk/test/CodeGen/R600/gv-const-addrspace.ll
llvm/trunk/test/CodeGen/R600/half.ll
llvm/trunk/test/CodeGen/R600/i8-to-double-to-float.ll
llvm/trunk/test/CodeGen/R600/icmp-select-sete-reverse-args.ll
llvm/trunk/test/CodeGen/R600/imm.ll
llvm/trunk/test/CodeGen/R600/indirect-private-64.ll
llvm/trunk/test/CodeGen/R600/insert_vector_elt.ll
llvm/trunk/test/CodeGen/R600/jump-address.ll
llvm/trunk/test/CodeGen/R600/kcache-fold.ll
llvm/trunk/test/CodeGen/R600/large-alloca.ll
llvm/trunk/test/CodeGen/R600/large-constant-initializer.ll
llvm/trunk/test/CodeGen/R600/lds-initializer.ll
llvm/trunk/test/CodeGen/R600/lds-oqap-crash.ll
llvm/trunk/test/CodeGen/R600/lds-output-queue.ll
llvm/trunk/test/CodeGen/R600/lds-zero-initializer.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.abs.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.global.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.barrier.local.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.i32.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.bfe.u32.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.brev.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.class.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cube.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.cvt_f32_ubyte.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.div_scale.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.fract.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imax.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.imin.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.tex.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.trig_preop.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umad24.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umax.ll
llvm/trunk/test/CodeGen/R600/llvm.AMDGPU.umin.ll
llvm/trunk/test/CodeGen/R600/llvm.SI.imageload.ll
llvm/trunk/test/CodeGen/R600/llvm.SI.load.dword.ll
llvm/trunk/test/CodeGen/R600/llvm.amdgpu.dp4.ll
llvm/trunk/test/CodeGen/R600/llvm.round.f64.ll
llvm/trunk/test/CodeGen/R600/load-i1.ll
llvm/trunk/test/CodeGen/R600/load-input-fold.ll
llvm/trunk/test/CodeGen/R600/load.ll
llvm/trunk/test/CodeGen/R600/load.vec.ll
llvm/trunk/test/CodeGen/R600/load64.ll
llvm/trunk/test/CodeGen/R600/local-64.ll
llvm/trunk/test/CodeGen/R600/local-memory-two-objects.ll
llvm/trunk/test/CodeGen/R600/local-memory.ll
llvm/trunk/test/CodeGen/R600/loop-idiom.ll
llvm/trunk/test/CodeGen/R600/m0-spill.ll
llvm/trunk/test/CodeGen/R600/mad-combine.ll
llvm/trunk/test/CodeGen/R600/mad-sub.ll
llvm/trunk/test/CodeGen/R600/madak.ll
llvm/trunk/test/CodeGen/R600/madmk.ll
llvm/trunk/test/CodeGen/R600/max.ll
llvm/trunk/test/CodeGen/R600/max3.ll
llvm/trunk/test/CodeGen/R600/min.ll
llvm/trunk/test/CodeGen/R600/min3.ll
llvm/trunk/test/CodeGen/R600/missing-store.ll
llvm/trunk/test/CodeGen/R600/mubuf.ll
llvm/trunk/test/CodeGen/R600/mul.ll
llvm/trunk/test/CodeGen/R600/no-initializer-constant-addrspace.ll
llvm/trunk/test/CodeGen/R600/no-shrink-extloads.ll
llvm/trunk/test/CodeGen/R600/or.ll
llvm/trunk/test/CodeGen/R600/parallelandifcollapse.ll
llvm/trunk/test/CodeGen/R600/parallelorifcollapse.ll
llvm/trunk/test/CodeGen/R600/private-memory.ll
llvm/trunk/test/CodeGen/R600/pv-packing.ll
llvm/trunk/test/CodeGen/R600/pv.ll
llvm/trunk/test/CodeGen/R600/r600-export-fix.ll
llvm/trunk/test/CodeGen/R600/r600cfg.ll
llvm/trunk/test/CodeGen/R600/register-count-comments.ll
llvm/trunk/test/CodeGen/R600/reorder-stores.ll
llvm/trunk/test/CodeGen/R600/rotl.i64.ll
llvm/trunk/test/CodeGen/R600/rotr.i64.ll
llvm/trunk/test/CodeGen/R600/rsq.ll
llvm/trunk/test/CodeGen/R600/s_movk_i32.ll
llvm/trunk/test/CodeGen/R600/saddo.ll
llvm/trunk/test/CodeGen/R600/salu-to-valu.ll
llvm/trunk/test/CodeGen/R600/scalar_to_vector.ll
llvm/trunk/test/CodeGen/R600/schedule-fs-loop-nested.ll
llvm/trunk/test/CodeGen/R600/schedule-fs-loop.ll
llvm/trunk/test/CodeGen/R600/schedule-global-loads.ll
llvm/trunk/test/CodeGen/R600/schedule-if-2.ll
llvm/trunk/test/CodeGen/R600/schedule-if.ll
llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop-failure.ll
llvm/trunk/test/CodeGen/R600/schedule-vs-if-nested-loop.ll
llvm/trunk/test/CodeGen/R600/scratch-buffer.ll
llvm/trunk/test/CodeGen/R600/sdiv.ll
llvm/trunk/test/CodeGen/R600/sdivrem24.ll
llvm/trunk/test/CodeGen/R600/select64.ll
llvm/trunk/test/CodeGen/R600/selectcc-cnd.ll
llvm/trunk/test/CodeGen/R600/selectcc-cnde-int.ll
llvm/trunk/test/CodeGen/R600/selectcc-icmp-select-float.ll
llvm/trunk/test/CodeGen/R600/setcc-opt.ll
llvm/trunk/test/CodeGen/R600/setcc.ll
llvm/trunk/test/CodeGen/R600/sext-in-reg.ll
llvm/trunk/test/CodeGen/R600/sgpr-control-flow.ll
llvm/trunk/test/CodeGen/R600/sgpr-copy-duplicate-operand.ll
llvm/trunk/test/CodeGen/R600/sgpr-copy.ll
llvm/trunk/test/CodeGen/R600/shl.ll
llvm/trunk/test/CodeGen/R600/shl_add_constant.ll
llvm/trunk/test/CodeGen/R600/shl_add_ptr.ll
llvm/trunk/test/CodeGen/R600/si-lod-bias.ll
llvm/trunk/test/CodeGen/R600/si-sgpr-spill.ll
llvm/trunk/test/CodeGen/R600/si-triv-disjoint-mem-access.ll
llvm/trunk/test/CodeGen/R600/si-vector-hang.ll
llvm/trunk/test/CodeGen/R600/sign_extend.ll
llvm/trunk/test/CodeGen/R600/simplify-demanded-bits-build-pair.ll
llvm/trunk/test/CodeGen/R600/sint_to_fp.f64.ll
llvm/trunk/test/CodeGen/R600/sint_to_fp.ll
llvm/trunk/test/CodeGen/R600/smrd.ll
llvm/trunk/test/CodeGen/R600/split-scalar-i64-add.ll
llvm/trunk/test/CodeGen/R600/sra.ll
llvm/trunk/test/CodeGen/R600/srem.ll
llvm/trunk/test/CodeGen/R600/srl.ll
llvm/trunk/test/CodeGen/R600/ssubo.ll
llvm/trunk/test/CodeGen/R600/store-barrier.ll
llvm/trunk/test/CodeGen/R600/store.ll
llvm/trunk/test/CodeGen/R600/store.r600.ll
llvm/trunk/test/CodeGen/R600/sub.ll
llvm/trunk/test/CodeGen/R600/swizzle-export.ll
llvm/trunk/test/CodeGen/R600/trunc-cmp-constant.ll
llvm/trunk/test/CodeGen/R600/trunc.ll
llvm/trunk/test/CodeGen/R600/uaddo.ll
llvm/trunk/test/CodeGen/R600/udiv.ll
llvm/trunk/test/CodeGen/R600/udivrem24.ll
llvm/trunk/test/CodeGen/R600/uint_to_fp.f64.ll
llvm/trunk/test/CodeGen/R600/uint_to_fp.ll
llvm/trunk/test/CodeGen/R600/unaligned-load-store.ll
llvm/trunk/test/CodeGen/R600/unhandled-loop-condition-assertion.ll
llvm/trunk/test/CodeGen/R600/unroll.ll
llvm/trunk/test/CodeGen/R600/urem.ll
llvm/trunk/test/CodeGen/R600/usubo.ll
llvm/trunk/test/CodeGen/R600/v_cndmask.ll
llvm/trunk/test/CodeGen/R600/valu-i1.ll
llvm/trunk/test/CodeGen/R600/vector-alloca.ll
llvm/trunk/test/CodeGen/R600/vertex-fetch-encoding.ll
llvm/trunk/test/CodeGen/R600/vselect.ll
llvm/trunk/test/CodeGen/R600/vtx-fetch-branch.ll
llvm/trunk/test/CodeGen/R600/vtx-schedule.ll
llvm/trunk/test/CodeGen/R600/wait.ll
llvm/trunk/test/CodeGen/R600/xor.ll
llvm/trunk/test/CodeGen/SPARC/2008-10-10-InlineAsmMemoryOperand.ll
llvm/trunk/test/CodeGen/SPARC/2009-08-28-PIC.ll
llvm/trunk/test/CodeGen/SPARC/2011-01-11-CC.ll
llvm/trunk/test/CodeGen/SPARC/2011-01-22-SRet.ll
llvm/trunk/test/CodeGen/SPARC/64abi.ll
llvm/trunk/test/CodeGen/SPARC/64bit.ll
llvm/trunk/test/CodeGen/SPARC/atomics.ll
llvm/trunk/test/CodeGen/SPARC/fp128.ll
llvm/trunk/test/CodeGen/SPARC/globals.ll
llvm/trunk/test/CodeGen/SPARC/leafproc.ll
llvm/trunk/test/CodeGen/SPARC/mult-alt-generic-sparc.ll
llvm/trunk/test/CodeGen/SPARC/obj-relocs.ll
llvm/trunk/test/CodeGen/SPARC/private.ll
llvm/trunk/test/CodeGen/SPARC/setjmp.ll
llvm/trunk/test/CodeGen/SPARC/spillsize.ll
llvm/trunk/test/CodeGen/SPARC/tls.ll
llvm/trunk/test/CodeGen/SPARC/varargs.ll
llvm/trunk/test/CodeGen/SystemZ/addr-01.ll
llvm/trunk/test/CodeGen/SystemZ/addr-02.ll
llvm/trunk/test/CodeGen/SystemZ/addr-03.ll
llvm/trunk/test/CodeGen/SystemZ/alias-01.ll
llvm/trunk/test/CodeGen/SystemZ/and-01.ll
llvm/trunk/test/CodeGen/SystemZ/and-03.ll
llvm/trunk/test/CodeGen/SystemZ/and-05.ll
llvm/trunk/test/CodeGen/SystemZ/and-06.ll
llvm/trunk/test/CodeGen/SystemZ/and-08.ll
llvm/trunk/test/CodeGen/SystemZ/asm-18.ll
llvm/trunk/test/CodeGen/SystemZ/atomic-load-01.ll
llvm/trunk/test/CodeGen/SystemZ/atomic-load-02.ll
llvm/trunk/test/CodeGen/SystemZ/atomic-load-03.ll
llvm/trunk/test/CodeGen/SystemZ/atomic-load-04.ll
llvm/trunk/test/CodeGen/SystemZ/branch-02.ll
llvm/trunk/test/CodeGen/SystemZ/branch-03.ll
llvm/trunk/test/CodeGen/SystemZ/branch-04.ll
llvm/trunk/test/CodeGen/SystemZ/branch-06.ll
llvm/trunk/test/CodeGen/SystemZ/branch-08.ll
llvm/trunk/test/CodeGen/SystemZ/bswap-02.ll
llvm/trunk/test/CodeGen/SystemZ/bswap-03.ll
llvm/trunk/test/CodeGen/SystemZ/cond-load-01.ll
llvm/trunk/test/CodeGen/SystemZ/cond-load-02.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-01.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-02.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-03.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-04.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-05.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-06.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-07.ll
llvm/trunk/test/CodeGen/SystemZ/cond-store-08.ll
llvm/trunk/test/CodeGen/SystemZ/fp-abs-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-abs-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-add-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-add-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-add-03.ll
llvm/trunk/test/CodeGen/SystemZ/fp-cmp-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-cmp-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-cmp-03.ll
llvm/trunk/test/CodeGen/SystemZ/fp-cmp-04.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-03.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-04.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-09.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-10.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-11.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-12.ll
llvm/trunk/test/CodeGen/SystemZ/fp-conv-14.ll
llvm/trunk/test/CodeGen/SystemZ/fp-copysign-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-div-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-div-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-div-03.ll
llvm/trunk/test/CodeGen/SystemZ/fp-move-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-move-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-move-03.ll
llvm/trunk/test/CodeGen/SystemZ/fp-move-04.ll
llvm/trunk/test/CodeGen/SystemZ/fp-move-05.ll
llvm/trunk/test/CodeGen/SystemZ/fp-move-09.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-03.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-04.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-05.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-06.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-07.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-08.ll
llvm/trunk/test/CodeGen/SystemZ/fp-mul-09.ll
llvm/trunk/test/CodeGen/SystemZ/fp-neg-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-round-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-round-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-sqrt-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-sqrt-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-sqrt-03.ll
llvm/trunk/test/CodeGen/SystemZ/fp-sub-01.ll
llvm/trunk/test/CodeGen/SystemZ/fp-sub-02.ll
llvm/trunk/test/CodeGen/SystemZ/fp-sub-03.ll
llvm/trunk/test/CodeGen/SystemZ/frame-02.ll
llvm/trunk/test/CodeGen/SystemZ/frame-03.ll
llvm/trunk/test/CodeGen/SystemZ/frame-04.ll
llvm/trunk/test/CodeGen/SystemZ/frame-05.ll
llvm/trunk/test/CodeGen/SystemZ/frame-06.ll
llvm/trunk/test/CodeGen/SystemZ/frame-07.ll
llvm/trunk/test/CodeGen/SystemZ/frame-08.ll
llvm/trunk/test/CodeGen/SystemZ/frame-09.ll
llvm/trunk/test/CodeGen/SystemZ/frame-13.ll
llvm/trunk/test/CodeGen/SystemZ/frame-14.ll
llvm/trunk/test/CodeGen/SystemZ/frame-15.ll
llvm/trunk/test/CodeGen/SystemZ/frame-16.ll
llvm/trunk/test/CodeGen/SystemZ/frame-17.ll
llvm/trunk/test/CodeGen/SystemZ/frame-18.ll
llvm/trunk/test/CodeGen/SystemZ/insert-01.ll
llvm/trunk/test/CodeGen/SystemZ/insert-02.ll
llvm/trunk/test/CodeGen/SystemZ/insert-06.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-01.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-02.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-03.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-04.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-05.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-08.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-09.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-10.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-11.ll
llvm/trunk/test/CodeGen/SystemZ/int-add-12.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-01.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-02.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-03.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-04.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-05.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-06.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-07.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-08.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-15.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-16.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-17.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-18.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-19.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-20.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-21.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-22.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-23.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-24.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-25.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-26.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-27.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-28.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-29.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-30.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-31.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-32.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-33.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-34.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-35.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-36.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-37.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-38.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-39.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-40.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-41.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-42.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-43.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-44.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-45.ll
llvm/trunk/test/CodeGen/SystemZ/int-cmp-48.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-01.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-02.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-03.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-04.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-05.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-06.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-07.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-08.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-09.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-10.ll
llvm/trunk/test/CodeGen/SystemZ/int-conv-11.ll
llvm/trunk/test/CodeGen/SystemZ/int-div-01.ll
llvm/trunk/test/CodeGen/SystemZ/int-div-02.ll
llvm/trunk/test/CodeGen/SystemZ/int-div-03.ll
llvm/trunk/test/CodeGen/SystemZ/int-div-04.ll
llvm/trunk/test/CodeGen/SystemZ/int-div-05.ll
llvm/trunk/test/CodeGen/SystemZ/int-move-02.ll
llvm/trunk/test/CodeGen/SystemZ/int-move-03.ll
llvm/trunk/test/CodeGen/SystemZ/int-move-08.ll
llvm/trunk/test/CodeGen/SystemZ/int-move-09.ll
llvm/trunk/test/CodeGen/SystemZ/int-mul-01.ll
llvm/trunk/test/CodeGen/SystemZ/int-mul-02.ll
llvm/trunk/test/CodeGen/SystemZ/int-mul-03.ll
llvm/trunk/test/CodeGen/SystemZ/int-mul-04.ll
llvm/trunk/test/CodeGen/SystemZ/int-mul-08.ll
llvm/trunk/test/CodeGen/SystemZ/int-sub-01.ll
llvm/trunk/test/CodeGen/SystemZ/int-sub-02.ll
llvm/trunk/test/CodeGen/SystemZ/int-sub-03.ll
llvm/trunk/test/CodeGen/SystemZ/int-sub-04.ll
llvm/trunk/test/CodeGen/SystemZ/int-sub-05.ll
llvm/trunk/test/CodeGen/SystemZ/int-sub-06.ll
llvm/trunk/test/CodeGen/SystemZ/int-sub-07.ll
llvm/trunk/test/CodeGen/SystemZ/loop-01.ll
llvm/trunk/test/CodeGen/SystemZ/memchr-02.ll
llvm/trunk/test/CodeGen/SystemZ/memcpy-02.ll
llvm/trunk/test/CodeGen/SystemZ/or-01.ll
llvm/trunk/test/CodeGen/SystemZ/or-03.ll
llvm/trunk/test/CodeGen/SystemZ/or-05.ll
llvm/trunk/test/CodeGen/SystemZ/or-06.ll
llvm/trunk/test/CodeGen/SystemZ/or-08.ll
llvm/trunk/test/CodeGen/SystemZ/serialize-01.ll
llvm/trunk/test/CodeGen/SystemZ/shift-01.ll
llvm/trunk/test/CodeGen/SystemZ/shift-02.ll
llvm/trunk/test/CodeGen/SystemZ/shift-03.ll
llvm/trunk/test/CodeGen/SystemZ/shift-04.ll
llvm/trunk/test/CodeGen/SystemZ/shift-05.ll
llvm/trunk/test/CodeGen/SystemZ/shift-06.ll
llvm/trunk/test/CodeGen/SystemZ/shift-07.ll
llvm/trunk/test/CodeGen/SystemZ/shift-08.ll
llvm/trunk/test/CodeGen/SystemZ/spill-01.ll
llvm/trunk/test/CodeGen/SystemZ/strcpy-01.ll
llvm/trunk/test/CodeGen/SystemZ/tls-05.ll
llvm/trunk/test/CodeGen/SystemZ/tls-06.ll
llvm/trunk/test/CodeGen/SystemZ/tls-07.ll
llvm/trunk/test/CodeGen/SystemZ/unaligned-01.ll
llvm/trunk/test/CodeGen/SystemZ/xor-01.ll
llvm/trunk/test/CodeGen/SystemZ/xor-03.ll
llvm/trunk/test/CodeGen/SystemZ/xor-05.ll
llvm/trunk/test/CodeGen/SystemZ/xor-06.ll
llvm/trunk/test/CodeGen/SystemZ/xor-08.ll
llvm/trunk/test/CodeGen/Thumb/2007-01-31-RegInfoAssert.ll
llvm/trunk/test/CodeGen/Thumb/2007-05-05-InvalidPushPop.ll
llvm/trunk/test/CodeGen/Thumb/2009-07-20-TwoAddrBug.ll
llvm/trunk/test/CodeGen/Thumb/2009-08-12-ConstIslandAssert.ll
llvm/trunk/test/CodeGen/Thumb/2009-08-12-RegInfoAssert.ll
llvm/trunk/test/CodeGen/Thumb/2009-08-20-ISelBug.ll
llvm/trunk/test/CodeGen/Thumb/2009-12-17-pre-regalloc-taildup.ll
llvm/trunk/test/CodeGen/Thumb/2011-05-11-DAGLegalizer.ll
llvm/trunk/test/CodeGen/Thumb/2011-EpilogueBug.ll
llvm/trunk/test/CodeGen/Thumb/2014-06-10-thumb1-ldst-opt-bug.ll
llvm/trunk/test/CodeGen/Thumb/asmprinter-bug.ll
llvm/trunk/test/CodeGen/Thumb/cortex-m0-unaligned-access.ll
llvm/trunk/test/CodeGen/Thumb/dyn-stackalloc.ll
llvm/trunk/test/CodeGen/Thumb/large-stack.ll
llvm/trunk/test/CodeGen/Thumb/ldm-merge-call.ll
llvm/trunk/test/CodeGen/Thumb/ldm-merge-struct.ll
llvm/trunk/test/CodeGen/Thumb/ldm-stm-base-materialization.ll
llvm/trunk/test/CodeGen/Thumb/ldr_ext.ll
llvm/trunk/test/CodeGen/Thumb/ldr_frame.ll
llvm/trunk/test/CodeGen/Thumb/long.ll
llvm/trunk/test/CodeGen/Thumb/segmented-stacks.ll
llvm/trunk/test/CodeGen/Thumb/stack-access.ll
llvm/trunk/test/CodeGen/Thumb/stm-merge.ll
llvm/trunk/test/CodeGen/Thumb/thumb-ldm.ll
llvm/trunk/test/CodeGen/Thumb/vargs.ll
llvm/trunk/test/CodeGen/Thumb2/2009-07-17-CrossRegClassCopy.ll
llvm/trunk/test/CodeGen/Thumb2/2009-07-21-ISelBug.ll
llvm/trunk/test/CodeGen/Thumb2/2009-07-30-PEICrash.ll
llvm/trunk/test/CodeGen/Thumb2/2009-08-01-WrongLDRBOpc.ll
llvm/trunk/test/CodeGen/Thumb2/2009-08-02-CoalescerBug.ll
llvm/trunk/test/CodeGen/Thumb2/2009-08-04-CoalescerBug.ll
llvm/trunk/test/CodeGen/Thumb2/2009-08-04-ScavengerAssert.ll
llvm/trunk/test/CodeGen/Thumb2/2009-08-04-SubregLoweringBug3.ll
llvm/trunk/test/CodeGen/Thumb2/2009-08-07-NeonFPBug.ll
llvm/trunk/test/CodeGen/Thumb2/2009-08-10-ISelBug.ll
llvm/trunk/test/CodeGen/Thumb2/2009-09-01-PostRAProlog.ll
llvm/trunk/test/CodeGen/Thumb2/2009-09-28-ITBlockBug.ll
llvm/trunk/test/CodeGen/Thumb2/2009-11-11-ScavengerAssert.ll
llvm/trunk/test/CodeGen/Thumb2/2009-12-01-LoopIVUsers.ll
llvm/trunk/test/CodeGen/Thumb2/2010-01-06-TailDuplicateLabels.ll
llvm/trunk/test/CodeGen/Thumb2/2010-03-08-addi12-ccout.ll
llvm/trunk/test/CodeGen/Thumb2/2010-03-15-AsmCCClobber.ll
llvm/trunk/test/CodeGen/Thumb2/2010-06-19-ITBlockCrash.ll
llvm/trunk/test/CodeGen/Thumb2/2010-06-21-TailMergeBug.ll
llvm/trunk/test/CodeGen/Thumb2/2010-08-10-VarSizedAllocaBug.ll
llvm/trunk/test/CodeGen/Thumb2/2011-06-07-TwoAddrEarlyClobber.ll
llvm/trunk/test/CodeGen/Thumb2/2011-12-16-T2SizeReduceAssert.ll
llvm/trunk/test/CodeGen/Thumb2/2012-01-13-CBNZBug.ll
llvm/trunk/test/CodeGen/Thumb2/2013-02-19-tail-call-register-hint.ll
llvm/trunk/test/CodeGen/Thumb2/aligned-constants.ll
llvm/trunk/test/CodeGen/Thumb2/aligned-spill.ll
llvm/trunk/test/CodeGen/Thumb2/bfi.ll
llvm/trunk/test/CodeGen/Thumb2/constant-islands-new-island-padding.ll
llvm/trunk/test/CodeGen/Thumb2/constant-islands.ll
llvm/trunk/test/CodeGen/Thumb2/crash.ll
llvm/trunk/test/CodeGen/Thumb2/cross-rc-coalescing-2.ll
llvm/trunk/test/CodeGen/Thumb2/float-ops.ll
llvm/trunk/test/CodeGen/Thumb2/frameless2.ll
llvm/trunk/test/CodeGen/Thumb2/ifcvt-neon.ll
llvm/trunk/test/CodeGen/Thumb2/inflate-regs.ll
llvm/trunk/test/CodeGen/Thumb2/large-call.ll
llvm/trunk/test/CodeGen/Thumb2/large-stack.ll
llvm/trunk/test/CodeGen/Thumb2/lsr-deficiency.ll
llvm/trunk/test/CodeGen/Thumb2/machine-licm.ll
llvm/trunk/test/CodeGen/Thumb2/tail-call-r9.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-call-tc.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-call.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ifcvt1-tc.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ifcvt1.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ifcvt2.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ifcvt3.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldm.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldr.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldr_ext.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldr_post.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldr_pre.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldrb.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldrd.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-ldrh.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-smul.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-spill-q.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-str_post.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-str_pre.ll
llvm/trunk/test/CodeGen/Thumb2/thumb2-tbh.ll
llvm/trunk/test/CodeGen/Thumb2/tls1.ll
llvm/trunk/test/CodeGen/Thumb2/tls2.ll
llvm/trunk/test/CodeGen/Thumb2/tpsoft.ll
llvm/trunk/test/CodeGen/Thumb2/v8_IT_2.ll
llvm/trunk/test/CodeGen/Thumb2/v8_IT_3.ll
llvm/trunk/test/CodeGen/X86/2005-01-17-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-01-19-ISelFoldingBug.ll
llvm/trunk/test/CodeGen/X86/2006-04-27-ISelFoldingBug.ll
llvm/trunk/test/CodeGen/X86/2006-05-01-SchedCausingSpills.ll
llvm/trunk/test/CodeGen/X86/2006-05-02-InstrSched1.ll
llvm/trunk/test/CodeGen/X86/2006-05-02-InstrSched2.ll
llvm/trunk/test/CodeGen/X86/2006-05-08-CoalesceSubRegClass.ll
llvm/trunk/test/CodeGen/X86/2006-05-08-InstrSched.ll
llvm/trunk/test/CodeGen/X86/2006-05-11-InstrSched.ll
llvm/trunk/test/CodeGen/X86/2006-05-25-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-07-20-InlineAsm.ll
llvm/trunk/test/CodeGen/X86/2006-08-07-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-08-16-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-09-01-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-10-09-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-10-10-FindModifiedNodeSlotBug.ll
llvm/trunk/test/CodeGen/X86/2006-10-12-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-10-13-CycleInDAG.ll
llvm/trunk/test/CodeGen/X86/2006-11-12-CSRetCC.ll
llvm/trunk/test/CodeGen/X86/2006-11-17-IllegalMove.ll
llvm/trunk/test/CodeGen/X86/2006-12-16-InlineAsmCrash.ll
llvm/trunk/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
llvm/trunk/test/CodeGen/X86/2007-02-04-OrAddrMode.ll
llvm/trunk/test/CodeGen/X86/2007-02-16-BranchFold.ll
llvm/trunk/test/CodeGen/X86/2007-02-19-LiveIntervalAssert.ll
llvm/trunk/test/CodeGen/X86/2007-03-01-SpillerCrash.ll
llvm/trunk/test/CodeGen/X86/2007-03-15-GEP-Idx-Sink.ll
llvm/trunk/test/CodeGen/X86/2007-03-16-InlineAsm.ll
llvm/trunk/test/CodeGen/X86/2007-03-26-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2007-04-17-LiveIntervalAssert.ll
llvm/trunk/test/CodeGen/X86/2007-05-05-VecCastExpand.ll
llvm/trunk/test/CodeGen/X86/2007-06-29-VecFPConstantCSEBug.ll
llvm/trunk/test/CodeGen/X86/2007-07-10-StackerAssert.ll
llvm/trunk/test/CodeGen/X86/2007-07-18-Vector-Extract.ll
llvm/trunk/test/CodeGen/X86/2007-08-09-IllegalX86-64Asm.ll
llvm/trunk/test/CodeGen/X86/2007-09-05-InvalidAsm.ll
llvm/trunk/test/CodeGen/X86/2007-10-04-AvoidEFLAGSCopy.ll
llvm/trunk/test/CodeGen/X86/2007-10-12-CoalesceExtSubReg.ll
llvm/trunk/test/CodeGen/X86/2007-10-12-SpillerUnfold1.ll
llvm/trunk/test/CodeGen/X86/2007-10-12-SpillerUnfold2.ll
llvm/trunk/test/CodeGen/X86/2007-10-14-CoalescerCrash.ll
llvm/trunk/test/CodeGen/X86/2007-10-19-SpillerUnfold.ll
llvm/trunk/test/CodeGen/X86/2007-10-29-ExtendSetCC.ll
llvm/trunk/test/CodeGen/X86/2007-10-31-extractelement-i64.ll
llvm/trunk/test/CodeGen/X86/2007-11-04-LiveIntervalCrash.ll
llvm/trunk/test/CodeGen/X86/2007-11-06-InstrSched.ll
llvm/trunk/test/CodeGen/X86/2007-11-07-MulBy4.ll
llvm/trunk/test/CodeGen/X86/2007-12-16-BURRSchedCrash.ll
llvm/trunk/test/CodeGen/X86/2007-12-18-LoadCSEBug.ll
llvm/trunk/test/CodeGen/X86/2008-01-08-SchedulerCrash.ll
llvm/trunk/test/CodeGen/X86/2008-01-16-FPStackifierAssert.ll
llvm/trunk/test/CodeGen/X86/2008-01-16-InvalidDAGCombineXform.ll
llvm/trunk/test/CodeGen/X86/2008-02-05-ISelCrash.ll
llvm/trunk/test/CodeGen/X86/2008-02-06-LoadFoldingBug.ll
llvm/trunk/test/CodeGen/X86/2008-02-18-TailMergingBug.ll
llvm/trunk/test/CodeGen/X86/2008-02-20-InlineAsmClobber.ll
llvm/trunk/test/CodeGen/X86/2008-02-22-LocalRegAllocBug.ll
llvm/trunk/test/CodeGen/X86/2008-02-25-X86-64-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2008-02-27-DeadSlotElimBug.ll
llvm/trunk/test/CodeGen/X86/2008-03-07-APIntBug.ll
llvm/trunk/test/CodeGen/X86/2008-03-10-RegAllocInfLoop.ll
llvm/trunk/test/CodeGen/X86/2008-03-12-ThreadLocalAlias.ll
llvm/trunk/test/CodeGen/X86/2008-03-14-SpillerCrash.ll
llvm/trunk/test/CodeGen/X86/2008-03-23-DarwinAsmComments.ll
llvm/trunk/test/CodeGen/X86/2008-03-31-SpillerFoldingBug.ll
llvm/trunk/test/CodeGen/X86/2008-04-09-BranchFolding.ll
llvm/trunk/test/CodeGen/X86/2008-04-15-LiveVariableBug.ll
llvm/trunk/test/CodeGen/X86/2008-04-16-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2008-04-17-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2008-04-24-pblendw-fold-crash.ll
llvm/trunk/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2008-05-09-ShuffleLoweringBug.ll
llvm/trunk/test/CodeGen/X86/2008-05-12-tailmerge-5.ll
llvm/trunk/test/CodeGen/X86/2008-05-21-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2008-05-22-FoldUnalignedLoad.ll
llvm/trunk/test/CodeGen/X86/2008-06-13-NotVolatileLoadStore.ll
llvm/trunk/test/CodeGen/X86/2008-06-13-VolatileLoadStore.ll
llvm/trunk/test/CodeGen/X86/2008-06-16-SubregsBug.ll
llvm/trunk/test/CodeGen/X86/2008-07-07-DanglingDeadInsts.ll
llvm/trunk/test/CodeGen/X86/2008-07-19-movups-spills.ll
llvm/trunk/test/CodeGen/X86/2008-07-22-CombinerCrash.ll
llvm/trunk/test/CodeGen/X86/2008-08-06-RewriterBug.ll
llvm/trunk/test/CodeGen/X86/2008-08-31-EH_RETURN64.ll
llvm/trunk/test/CodeGen/X86/2008-09-09-LinearScanBug.ll
llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2008-09-11-CoalescerBug2.ll
llvm/trunk/test/CodeGen/X86/2008-09-17-inline-asm-1.ll
llvm/trunk/test/CodeGen/X86/2008-09-18-inline-asm-2.ll
llvm/trunk/test/CodeGen/X86/2008-09-19-RegAllocBug.ll
llvm/trunk/test/CodeGen/X86/2008-09-29-ReMatBug.ll
llvm/trunk/test/CodeGen/X86/2008-09-29-VolatileBug.ll
llvm/trunk/test/CodeGen/X86/2008-10-06-x87ld-nan-2.ll
llvm/trunk/test/CodeGen/X86/2008-10-07-SSEISelBug.ll
llvm/trunk/test/CodeGen/X86/2008-10-11-CallCrash.ll
llvm/trunk/test/CodeGen/X86/2008-10-16-VecUnaryOp.ll
llvm/trunk/test/CodeGen/X86/2008-10-27-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2008-11-06-testb.ll
llvm/trunk/test/CodeGen/X86/2008-12-01-loop-iv-used-outside-loop.ll
llvm/trunk/test/CodeGen/X86/2008-12-02-IllegalResultType.ll
llvm/trunk/test/CodeGen/X86/2009-01-16-SchedulerBug.ll
llvm/trunk/test/CodeGen/X86/2009-01-18-ConstantExprCrash.ll
llvm/trunk/test/CodeGen/X86/2009-01-31-BigShift2.ll
llvm/trunk/test/CodeGen/X86/2009-02-01-LargeMask.ll
llvm/trunk/test/CodeGen/X86/2009-02-03-AnalyzedTwice.ll
llvm/trunk/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
llvm/trunk/test/CodeGen/X86/2009-02-12-DebugInfoVLA.ll
llvm/trunk/test/CodeGen/X86/2009-02-26-MachineLICMBug.ll
llvm/trunk/test/CodeGen/X86/2009-03-03-BTHang.ll
llvm/trunk/test/CodeGen/X86/2009-03-05-burr-list-crash.ll
llvm/trunk/test/CodeGen/X86/2009-03-09-APIntCrash.ll
llvm/trunk/test/CodeGen/X86/2009-03-10-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2009-03-23-LinearScanBug.ll
llvm/trunk/test/CodeGen/X86/2009-03-23-MultiUseSched.ll
llvm/trunk/test/CodeGen/X86/2009-03-25-TestBug.ll
llvm/trunk/test/CodeGen/X86/2009-04-14-IllegalRegs.ll
llvm/trunk/test/CodeGen/X86/2009-04-16-SpillerUnfold.ll
llvm/trunk/test/CodeGen/X86/2009-04-24.ll
llvm/trunk/test/CodeGen/X86/2009-04-25-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2009-04-27-CoalescerAssert.ll
llvm/trunk/test/CodeGen/X86/2009-04-29-IndirectDestOperands.ll
llvm/trunk/test/CodeGen/X86/2009-04-29-LinearScanBug.ll
llvm/trunk/test/CodeGen/X86/2009-04-29-RegAllocAssert.ll
llvm/trunk/test/CodeGen/X86/2009-04-scale.ll
llvm/trunk/test/CodeGen/X86/2009-05-11-tailmerge-crash.ll
llvm/trunk/test/CodeGen/X86/2009-05-28-DAGCombineCrash.ll
llvm/trunk/test/CodeGen/X86/2009-05-30-ISelBug.ll
llvm/trunk/test/CodeGen/X86/2009-06-02-RewriterBug.ll
llvm/trunk/test/CodeGen/X86/2009-06-04-VirtualLiveIn.ll
llvm/trunk/test/CodeGen/X86/2009-06-05-VZextByteShort.ll
llvm/trunk/test/CodeGen/X86/2009-07-15-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2009-07-20-DAGCombineBug.ll
llvm/trunk/test/CodeGen/X86/2009-08-06-branchfolder-crash.ll
llvm/trunk/test/CodeGen/X86/2009-08-14-Win64MemoryIndirectArg.ll
llvm/trunk/test/CodeGen/X86/2009-08-19-LoadNarrowingMiscompile.ll
llvm/trunk/test/CodeGen/X86/2009-08-23-SubRegReuseUndo.ll
llvm/trunk/test/CodeGen/X86/2009-09-10-LoadFoldingBug.ll
llvm/trunk/test/CodeGen/X86/2009-09-10-SpillComments.ll
llvm/trunk/test/CodeGen/X86/2009-09-16-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2009-09-21-NoSpillLoopCount.ll
llvm/trunk/test/CodeGen/X86/2009-09-22-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2009-10-19-EmergencySpill.ll
llvm/trunk/test/CodeGen/X86/2009-10-19-atomic-cmp-eflags.ll
llvm/trunk/test/CodeGen/X86/2009-10-25-RewriterBug.ll
llvm/trunk/test/CodeGen/X86/2009-11-16-MachineLICM.ll
llvm/trunk/test/CodeGen/X86/2009-11-25-ImpDefBug.ll
llvm/trunk/test/CodeGen/X86/2009-12-01-EarlyClobberBug.ll
llvm/trunk/test/CodeGen/X86/2009-12-11-TLSNoRedZone.ll
llvm/trunk/test/CodeGen/X86/20090313-signext.ll
llvm/trunk/test/CodeGen/X86/2010-01-13-OptExtBug.ll
llvm/trunk/test/CodeGen/X86/2010-01-15-SelectionDAGCycle.ll
llvm/trunk/test/CodeGen/X86/2010-01-18-DbgValue.ll
llvm/trunk/test/CodeGen/X86/2010-01-19-OptExtBug.ll
llvm/trunk/test/CodeGen/X86/2010-02-04-SchedulerBug.ll
llvm/trunk/test/CodeGen/X86/2010-02-11-NonTemporal.ll
llvm/trunk/test/CodeGen/X86/2010-02-12-CoalescerBug-Impdef.ll
llvm/trunk/test/CodeGen/X86/2010-02-19-TailCallRetAddrBug.ll
llvm/trunk/test/CodeGen/X86/2010-02-23-RematImplicitSubreg.ll
llvm/trunk/test/CodeGen/X86/2010-03-17-ISelBug.ll
llvm/trunk/test/CodeGen/X86/2010-04-06-SSEDomainFixCrash.ll
llvm/trunk/test/CodeGen/X86/2010-04-08-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2010-04-13-AnalyzeBranchCrash.ll
llvm/trunk/test/CodeGen/X86/2010-04-30-LocalAlloc-LandingPad.ll
llvm/trunk/test/CodeGen/X86/2010-05-05-LocalAllocEarlyClobber.ll
llvm/trunk/test/CodeGen/X86/2010-05-07-ldconvert.ll
llvm/trunk/test/CodeGen/X86/2010-05-10-DAGCombinerBug.ll
llvm/trunk/test/CodeGen/X86/2010-05-16-nosseconversion.ll
llvm/trunk/test/CodeGen/X86/2010-05-26-DotDebugLoc.ll
llvm/trunk/test/CodeGen/X86/2010-05-26-FP_TO_INT-crash.ll
llvm/trunk/test/CodeGen/X86/2010-06-14-fast-isel-fs-load.ll
llvm/trunk/test/CodeGen/X86/2010-06-15-FastAllocEarlyCLobber.ll
llvm/trunk/test/CodeGen/X86/2010-06-25-CoalescerSubRegDefDead.ll
llvm/trunk/test/CodeGen/X86/2010-06-25-asm-RA-crash.ll
llvm/trunk/test/CodeGen/X86/2010-06-28-matched-g-constraint.ll
llvm/trunk/test/CodeGen/X86/2010-07-02-UnfoldBug.ll
llvm/trunk/test/CodeGen/X86/2010-07-11-FPStackLoneUse.ll
llvm/trunk/test/CodeGen/X86/2010-08-04-MaskedSignedCompare.ll
llvm/trunk/test/CodeGen/X86/2010-08-04-StackVariable.ll
llvm/trunk/test/CodeGen/X86/2010-09-01-RemoveCopyByCommutingDef.ll
llvm/trunk/test/CodeGen/X86/2010-09-17-SideEffectsInChain.ll
llvm/trunk/test/CodeGen/X86/2010-11-09-MOVLPS.ll
llvm/trunk/test/CodeGen/X86/2010-11-18-SelectOfExtload.ll
llvm/trunk/test/CodeGen/X86/2011-02-12-shuffle.ll
llvm/trunk/test/CodeGen/X86/2011-03-02-DAGCombiner.ll
llvm/trunk/test/CodeGen/X86/2011-03-09-Physreg-Coalescing.ll
llvm/trunk/test/CodeGen/X86/2011-04-13-SchedCmpJmp.ll
llvm/trunk/test/CodeGen/X86/2011-05-09-loaduse.ll
llvm/trunk/test/CodeGen/X86/2011-05-26-UnreachableBlockElim.ll
llvm/trunk/test/CodeGen/X86/2011-05-27-CrossClassCoalescing.ll
llvm/trunk/test/CodeGen/X86/2011-06-01-fildll.ll
llvm/trunk/test/CodeGen/X86/2011-06-03-x87chain.ll
llvm/trunk/test/CodeGen/X86/2011-06-12-FastAllocSpill.ll
llvm/trunk/test/CodeGen/X86/2011-07-13-BadFrameIndexDisplacement.ll
llvm/trunk/test/CodeGen/X86/2011-09-14-valcoalesce.ll
llvm/trunk/test/CodeGen/X86/2011-09-21-setcc-bug.ll
llvm/trunk/test/CodeGen/X86/2011-10-11-srl.ll
llvm/trunk/test/CodeGen/X86/2011-10-12-MachineCSE.ll
llvm/trunk/test/CodeGen/X86/2011-10-18-FastISel-VectorParams.ll
llvm/trunk/test/CodeGen/X86/2011-10-19-LegelizeLoad.ll
llvm/trunk/test/CodeGen/X86/2011-10-19-widen_vselect.ll
llvm/trunk/test/CodeGen/X86/2011-10-27-tstore.ll
llvm/trunk/test/CodeGen/X86/2011-11-22-AVX2-Domains.ll
llvm/trunk/test/CodeGen/X86/2011-12-08-AVXISelBugs.ll
llvm/trunk/test/CodeGen/X86/2011-12-26-extractelement-duplicate-load.ll
llvm/trunk/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
llvm/trunk/test/CodeGen/X86/2012-01-11-split-cv.ll
llvm/trunk/test/CodeGen/X86/2012-01-12-extract-sv.ll
llvm/trunk/test/CodeGen/X86/2012-01-16-mfence-nosse-flags.ll
llvm/trunk/test/CodeGen/X86/2012-02-12-dagco.ll
llvm/trunk/test/CodeGen/X86/2012-02-29-CoalescerBug.ll
llvm/trunk/test/CodeGen/X86/2012-03-26-PostRALICMBug.ll
llvm/trunk/test/CodeGen/X86/2012-04-26-sdglue.ll
llvm/trunk/test/CodeGen/X86/2012-07-10-extload64.ll
llvm/trunk/test/CodeGen/X86/2012-07-15-broadcastfold.ll
llvm/trunk/test/CodeGen/X86/2012-08-17-legalizer-crash.ll
llvm/trunk/test/CodeGen/X86/2012-09-28-CGPBug.ll
llvm/trunk/test/CodeGen/X86/2012-10-02-DAGCycle.ll
llvm/trunk/test/CodeGen/X86/2012-10-03-DAGCycle.ll
llvm/trunk/test/CodeGen/X86/2012-10-18-crash-dagco.ll
llvm/trunk/test/CodeGen/X86/2012-11-28-merge-store-alias.ll
llvm/trunk/test/CodeGen/X86/2012-11-30-handlemove-dbg.ll
llvm/trunk/test/CodeGen/X86/2012-11-30-misched-dbg.ll
llvm/trunk/test/CodeGen/X86/2012-12-06-python27-miscompile.ll
llvm/trunk/test/CodeGen/X86/2012-12-19-NoImplicitFloat.ll
llvm/trunk/test/CodeGen/X86/2013-03-13-VEX-DestReg.ll
llvm/trunk/test/CodeGen/X86/2013-10-14-FastISel-incorrect-vreg.ll
llvm/trunk/test/CodeGen/X86/Atomics-64.ll
llvm/trunk/test/CodeGen/X86/GC/alloc_loop.ll
llvm/trunk/test/CodeGen/X86/GC/argpromotion.ll
llvm/trunk/test/CodeGen/X86/GC/inline.ll
llvm/trunk/test/CodeGen/X86/GC/inline2.ll
llvm/trunk/test/CodeGen/X86/MachineBranchProb.ll
llvm/trunk/test/CodeGen/X86/MachineSink-DbgValue.ll
llvm/trunk/test/CodeGen/X86/MachineSink-eflags.ll
llvm/trunk/test/CodeGen/X86/MergeConsecutiveStores.ll
llvm/trunk/test/CodeGen/X86/StackColoring.ll
llvm/trunk/test/CodeGen/X86/SwitchLowering.ll
llvm/trunk/test/CodeGen/X86/SwizzleShuff.ll
llvm/trunk/test/CodeGen/X86/abi-isel.ll
llvm/trunk/test/CodeGen/X86/addr-mode-matcher.ll
llvm/trunk/test/CodeGen/X86/address-type-promotion-constantexpr.ll
llvm/trunk/test/CodeGen/X86/aliases.ll
llvm/trunk/test/CodeGen/X86/aligned-variadic.ll
llvm/trunk/test/CodeGen/X86/and-su.ll
llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload32.ll
llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect-foldedreload64.ll
llvm/trunk/test/CodeGen/X86/atom-call-reg-indirect.ll
llvm/trunk/test/CodeGen/X86/atom-cmpb.ll
llvm/trunk/test/CodeGen/X86/atom-fixup-lea1.ll
llvm/trunk/test/CodeGen/X86/atom-fixup-lea2.ll
llvm/trunk/test/CodeGen/X86/atom-fixup-lea3.ll
llvm/trunk/test/CodeGen/X86/atom-fixup-lea4.ll
llvm/trunk/test/CodeGen/X86/atom-lea-addw-bug.ll
llvm/trunk/test/CodeGen/X86/atom-sched.ll
llvm/trunk/test/CodeGen/X86/atomic-dagsched.ll
llvm/trunk/test/CodeGen/X86/atomic-load-store-wide.ll
llvm/trunk/test/CodeGen/X86/atomic-load-store.ll
llvm/trunk/test/CodeGen/X86/atomic-or.ll
llvm/trunk/test/CodeGen/X86/atomic-pointer.ll
llvm/trunk/test/CodeGen/X86/atomic128.ll
llvm/trunk/test/CodeGen/X86/atomic_mi.ll
llvm/trunk/test/CodeGen/X86/atomic_op.ll
llvm/trunk/test/CodeGen/X86/avoid-loop-align-2.ll
llvm/trunk/test/CodeGen/X86/avoid-loop-align.ll
llvm/trunk/test/CodeGen/X86/avoid_complex_am.ll
llvm/trunk/test/CodeGen/X86/avx-arith.ll
llvm/trunk/test/CodeGen/X86/avx-basic.ll
llvm/trunk/test/CodeGen/X86/avx-bitcast.ll
llvm/trunk/test/CodeGen/X86/avx-cvt.ll
llvm/trunk/test/CodeGen/X86/avx-intel-ocl.ll
llvm/trunk/test/CodeGen/X86/avx-intrinsics-x86.ll
llvm/trunk/test/CodeGen/X86/avx-load-store.ll
llvm/trunk/test/CodeGen/X86/avx-logic.ll
llvm/trunk/test/CodeGen/X86/avx-splat.ll
llvm/trunk/test/CodeGen/X86/avx-unpack.ll
llvm/trunk/test/CodeGen/X86/avx-varargs-x86_64.ll
llvm/trunk/test/CodeGen/X86/avx-vbroadcast.ll
llvm/trunk/test/CodeGen/X86/avx-vinsertf128.ll
llvm/trunk/test/CodeGen/X86/avx-vperm2x128.ll
llvm/trunk/test/CodeGen/X86/avx-vzeroupper.ll
llvm/trunk/test/CodeGen/X86/avx.ll
llvm/trunk/test/CodeGen/X86/avx1-logical-load-folding.ll
llvm/trunk/test/CodeGen/X86/avx2-conversions.ll
llvm/trunk/test/CodeGen/X86/avx2-pmovxrm-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx2-shift.ll
llvm/trunk/test/CodeGen/X86/avx2-vbroadcast.ll
llvm/trunk/test/CodeGen/X86/avx512-arith.ll
llvm/trunk/test/CodeGen/X86/avx512-build-vector.ll
llvm/trunk/test/CodeGen/X86/avx512-cvt.ll
llvm/trunk/test/CodeGen/X86/avx512-gather-scatter-intrin.ll
llvm/trunk/test/CodeGen/X86/avx512-i1test.ll
llvm/trunk/test/CodeGen/X86/avx512-insert-extract.ll
llvm/trunk/test/CodeGen/X86/avx512-intel-ocl.ll
llvm/trunk/test/CodeGen/X86/avx512-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx512-logic.ll
llvm/trunk/test/CodeGen/X86/avx512-mask-op.ll
llvm/trunk/test/CodeGen/X86/avx512-mov.ll
llvm/trunk/test/CodeGen/X86/avx512-round.ll
llvm/trunk/test/CodeGen/X86/avx512-shift.ll
llvm/trunk/test/CodeGen/X86/avx512-vbroadcast.ll
llvm/trunk/test/CodeGen/X86/avx512-vec-cmp.ll
llvm/trunk/test/CodeGen/X86/avx512bw-arith.ll
llvm/trunk/test/CodeGen/X86/avx512bw-mask-op.ll
llvm/trunk/test/CodeGen/X86/avx512bw-mov.ll
llvm/trunk/test/CodeGen/X86/avx512bw-vec-cmp.ll
llvm/trunk/test/CodeGen/X86/avx512bwvl-arith.ll
llvm/trunk/test/CodeGen/X86/avx512bwvl-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx512bwvl-mov.ll
llvm/trunk/test/CodeGen/X86/avx512bwvl-vec-cmp.ll
llvm/trunk/test/CodeGen/X86/avx512dq-mask-op.ll
llvm/trunk/test/CodeGen/X86/avx512er-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx512vl-arith.ll
llvm/trunk/test/CodeGen/X86/avx512vl-intrinsics.ll
llvm/trunk/test/CodeGen/X86/avx512vl-mov.ll
llvm/trunk/test/CodeGen/X86/avx512vl-vec-cmp.ll
llvm/trunk/test/CodeGen/X86/bitcast-mmx.ll
llvm/trunk/test/CodeGen/X86/block-placement.ll
llvm/trunk/test/CodeGen/X86/bmi.ll
llvm/trunk/test/CodeGen/X86/break-anti-dependencies.ll
llvm/trunk/test/CodeGen/X86/break-false-dep.ll
llvm/trunk/test/CodeGen/X86/bswap.ll
llvm/trunk/test/CodeGen/X86/byval-align.ll
llvm/trunk/test/CodeGen/X86/byval.ll
llvm/trunk/test/CodeGen/X86/call-push.ll
llvm/trunk/test/CodeGen/X86/cas.ll
llvm/trunk/test/CodeGen/X86/chain_order.ll
llvm/trunk/test/CodeGen/X86/change-compare-stride-1.ll
llvm/trunk/test/CodeGen/X86/clobber-fi0.ll
llvm/trunk/test/CodeGen/X86/cmov-into-branch.ll
llvm/trunk/test/CodeGen/X86/cmov.ll
llvm/trunk/test/CodeGen/X86/cmp.ll
llvm/trunk/test/CodeGen/X86/cmpxchg-clobber-flags.ll
llvm/trunk/test/CodeGen/X86/cmpxchg-i1.ll
llvm/trunk/test/CodeGen/X86/cmpxchg-i128-i1.ll
llvm/trunk/test/CodeGen/X86/coalesce-esp.ll
llvm/trunk/test/CodeGen/X86/coalesce-implicitdef.ll
llvm/trunk/test/CodeGen/X86/coalescer-commute1.ll
llvm/trunk/test/CodeGen/X86/coalescer-commute4.ll
llvm/trunk/test/CodeGen/X86/coalescer-cross.ll
llvm/trunk/test/CodeGen/X86/coalescer-dce2.ll
llvm/trunk/test/CodeGen/X86/coalescer-identity.ll
llvm/trunk/test/CodeGen/X86/code_placement.ll
llvm/trunk/test/CodeGen/X86/codegen-prepare-addrmode-sext.ll
llvm/trunk/test/CodeGen/X86/codegen-prepare-cast.ll
llvm/trunk/test/CodeGen/X86/codegen-prepare-extload.ll
llvm/trunk/test/CodeGen/X86/codegen-prepare.ll
llvm/trunk/test/CodeGen/X86/codemodel.ll
llvm/trunk/test/CodeGen/X86/combiner-aa-0.ll
llvm/trunk/test/CodeGen/X86/combiner-aa-1.ll
llvm/trunk/test/CodeGen/X86/commute-blend-avx2.ll
llvm/trunk/test/CodeGen/X86/commute-blend-sse41.ll
llvm/trunk/test/CodeGen/X86/commute-clmul.ll
llvm/trunk/test/CodeGen/X86/commute-fcmp.ll
llvm/trunk/test/CodeGen/X86/commute-intrinsic.ll
llvm/trunk/test/CodeGen/X86/commute-xop.ll
llvm/trunk/test/CodeGen/X86/compact-unwind.ll
llvm/trunk/test/CodeGen/X86/complex-asm.ll
llvm/trunk/test/CodeGen/X86/computeKnownBits_urem.ll
llvm/trunk/test/CodeGen/X86/const-base-addr.ll
llvm/trunk/test/CodeGen/X86/constant-combines.ll
llvm/trunk/test/CodeGen/X86/constant-hoisting-optnone.ll
llvm/trunk/test/CodeGen/X86/constant-hoisting-shift-immediate.ll
llvm/trunk/test/CodeGen/X86/convert-2-addr-3-addr-inc64.ll
llvm/trunk/test/CodeGen/X86/cppeh-catch-all.ll
llvm/trunk/test/CodeGen/X86/cppeh-catch-scalar.ll
llvm/trunk/test/CodeGen/X86/cppeh-frame-vars.ll
llvm/trunk/test/CodeGen/X86/crash-O0.ll
llvm/trunk/test/CodeGen/X86/crash-nosse.ll
llvm/trunk/test/CodeGen/X86/crash.ll
llvm/trunk/test/CodeGen/X86/critical-anti-dep-breaker.ll
llvm/trunk/test/CodeGen/X86/cse-add-with-overflow.ll
llvm/trunk/test/CodeGen/X86/cvt16.ll
llvm/trunk/test/CodeGen/X86/dagcombine-buildvector.ll
llvm/trunk/test/CodeGen/X86/dagcombine-cse.ll
llvm/trunk/test/CodeGen/X86/darwin-quote.ll
llvm/trunk/test/CodeGen/X86/dbg-changes-codegen.ll
llvm/trunk/test/CodeGen/X86/dbg-combine.ll
llvm/trunk/test/CodeGen/X86/discontiguous-loops.ll
llvm/trunk/test/CodeGen/X86/div8.ll
llvm/trunk/test/CodeGen/X86/dllimport-x86_64.ll
llvm/trunk/test/CodeGen/X86/dllimport.ll
llvm/trunk/test/CodeGen/X86/dollar-name.ll
llvm/trunk/test/CodeGen/X86/dont-trunc-store-double-to-float.ll
llvm/trunk/test/CodeGen/X86/dynamic-allocas-VLAs.ll
llvm/trunk/test/CodeGen/X86/early-ifcvt.ll
llvm/trunk/test/CodeGen/X86/emit-big-cst.ll
llvm/trunk/test/CodeGen/X86/expand-opaque-const.ll
llvm/trunk/test/CodeGen/X86/extend.ll
llvm/trunk/test/CodeGen/X86/extract-extract.ll
llvm/trunk/test/CodeGen/X86/extractelement-load.ll
llvm/trunk/test/CodeGen/X86/extractps.ll
llvm/trunk/test/CodeGen/X86/f16c-intrinsics.ll
llvm/trunk/test/CodeGen/X86/fast-isel-args-fail.ll
llvm/trunk/test/CodeGen/X86/fast-isel-avoid-unnecessary-pic-base.ll
llvm/trunk/test/CodeGen/X86/fast-isel-call-bool.ll
llvm/trunk/test/CodeGen/X86/fast-isel-fold-mem.ll
llvm/trunk/test/CodeGen/X86/fast-isel-fptrunc-fpext.ll
llvm/trunk/test/CodeGen/X86/fast-isel-gep.ll
llvm/trunk/test/CodeGen/X86/fast-isel-gv.ll
llvm/trunk/test/CodeGen/X86/fast-isel-i1.ll
llvm/trunk/test/CodeGen/X86/fast-isel-int-float-conversion.ll
llvm/trunk/test/CodeGen/X86/fast-isel-mem.ll
llvm/trunk/test/CodeGen/X86/fast-isel-tailcall.ll
llvm/trunk/test/CodeGen/X86/fast-isel-tls.ll
llvm/trunk/test/CodeGen/X86/fast-isel-x86-64.ll
llvm/trunk/test/CodeGen/X86/fast-isel-x86.ll
llvm/trunk/test/CodeGen/X86/fast-isel.ll
llvm/trunk/test/CodeGen/X86/fastcc-byval.ll
llvm/trunk/test/CodeGen/X86/fastcc-sret.ll
llvm/trunk/test/CodeGen/X86/fastcc.ll
llvm/trunk/test/CodeGen/X86/fastisel-gep-promote-before-add.ll
llvm/trunk/test/CodeGen/X86/fma-do-not-commute.ll
llvm/trunk/test/CodeGen/X86/fma4-intrinsics-x86_64-folded-load.ll
llvm/trunk/test/CodeGen/X86/fma_patterns.ll
llvm/trunk/test/CodeGen/X86/fmul-zero.ll
llvm/trunk/test/CodeGen/X86/fold-add.ll
llvm/trunk/test/CodeGen/X86/fold-and-shift.ll
llvm/trunk/test/CodeGen/X86/fold-call-2.ll
llvm/trunk/test/CodeGen/X86/fold-call-3.ll
llvm/trunk/test/CodeGen/X86/fold-call-oper.ll
llvm/trunk/test/CodeGen/X86/fold-call.ll
llvm/trunk/test/CodeGen/X86/fold-load-unops.ll
llvm/trunk/test/CodeGen/X86/fold-load-vec.ll
llvm/trunk/test/CodeGen/X86/fold-load.ll
llvm/trunk/test/CodeGen/X86/fold-mul-lohi.ll
llvm/trunk/test/CodeGen/X86/fold-pcmpeqd-2.ll
llvm/trunk/test/CodeGen/X86/fold-sext-trunc.ll
llvm/trunk/test/CodeGen/X86/fold-tied-op.ll
llvm/trunk/test/CodeGen/X86/fold-vex.ll
llvm/trunk/test/CodeGen/X86/fold-zext-trunc.ll
llvm/trunk/test/CodeGen/X86/force-align-stack-alloca.ll
llvm/trunk/test/CodeGen/X86/fp-double-rounding.ll
llvm/trunk/test/CodeGen/X86/fp-load-trunc.ll
llvm/trunk/test/CodeGen/X86/fp-stack-O0-crash.ll
llvm/trunk/test/CodeGen/X86/fp-stack-compare-cmov.ll
llvm/trunk/test/CodeGen/X86/fp-stack-compare.ll
llvm/trunk/test/CodeGen/X86/fp-stack-ret.ll
llvm/trunk/test/CodeGen/X86/fp-stack.ll
llvm/trunk/test/CodeGen/X86/fp2sint.ll
llvm/trunk/test/CodeGen/X86/fp_load_cast_fold.ll
llvm/trunk/test/CodeGen/X86/fp_load_fold.ll
llvm/trunk/test/CodeGen/X86/frameallocate.ll
llvm/trunk/test/CodeGen/X86/full-lsr.ll
llvm/trunk/test/CodeGen/X86/gather-addresses.ll
llvm/trunk/test/CodeGen/X86/ghc-cc.ll
llvm/trunk/test/CodeGen/X86/ghc-cc64.ll
llvm/trunk/test/CodeGen/X86/gs-fold.ll
llvm/trunk/test/CodeGen/X86/h-register-addressing-32.ll
llvm/trunk/test/CodeGen/X86/h-register-addressing-64.ll
llvm/trunk/test/CodeGen/X86/half.ll
llvm/trunk/test/CodeGen/X86/hidden-vis-2.ll
llvm/trunk/test/CodeGen/X86/hidden-vis-3.ll
llvm/trunk/test/CodeGen/X86/hidden-vis-4.ll
llvm/trunk/test/CodeGen/X86/hidden-vis-pic.ll
llvm/trunk/test/CodeGen/X86/hipe-cc.ll
llvm/trunk/test/CodeGen/X86/hipe-cc64.ll
llvm/trunk/test/CodeGen/X86/hoist-invariant-load.ll
llvm/trunk/test/CodeGen/X86/i128-mul.ll
llvm/trunk/test/CodeGen/X86/i128-ret.ll
llvm/trunk/test/CodeGen/X86/i1narrowfail.ll
llvm/trunk/test/CodeGen/X86/i256-add.ll
llvm/trunk/test/CodeGen/X86/i2k.ll
llvm/trunk/test/CodeGen/X86/i486-fence-loop.ll
llvm/trunk/test/CodeGen/X86/i64-mem-copy.ll
llvm/trunk/test/CodeGen/X86/inline-asm-fpstack.ll
llvm/trunk/test/CodeGen/X86/inline-asm-out-regs.ll
llvm/trunk/test/CodeGen/X86/inline-asm-ptr-cast.ll
llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign.ll
llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign2.ll
llvm/trunk/test/CodeGen/X86/inline-asm-stack-realign3.ll
llvm/trunk/test/CodeGen/X86/inline-asm-tied.ll
llvm/trunk/test/CodeGen/X86/ins_split_regalloc.ll
llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-1.ll
llvm/trunk/test/CodeGen/X86/ins_subreg_coalesce-3.ll
llvm/trunk/test/CodeGen/X86/insertps-O0-bug.ll
llvm/trunk/test/CodeGen/X86/invalid-shift-immediate.ll
llvm/trunk/test/CodeGen/X86/isel-optnone.ll
llvm/trunk/test/CodeGen/X86/isel-sink.ll
llvm/trunk/test/CodeGen/X86/isel-sink2.ll
llvm/trunk/test/CodeGen/X86/isel-sink3.ll
llvm/trunk/test/CodeGen/X86/jump_sign.ll
llvm/trunk/test/CodeGen/X86/large-constants.ll
llvm/trunk/test/CodeGen/X86/ldzero.ll
llvm/trunk/test/CodeGen/X86/lea-5.ll
llvm/trunk/test/CodeGen/X86/lea-recursion.ll
llvm/trunk/test/CodeGen/X86/legalize-shift-64.ll
llvm/trunk/test/CodeGen/X86/licm-nested.ll
llvm/trunk/test/CodeGen/X86/liveness-local-regalloc.ll
llvm/trunk/test/CodeGen/X86/load-slice.ll
llvm/trunk/test/CodeGen/X86/longlong-deadload.ll
llvm/trunk/test/CodeGen/X86/loop-strength-reduce4.ll
llvm/trunk/test/CodeGen/X86/loop-strength-reduce7.ll
llvm/trunk/test/CodeGen/X86/loop-strength-reduce8.ll
llvm/trunk/test/CodeGen/X86/lsr-delayed-fold.ll
llvm/trunk/test/CodeGen/X86/lsr-i386.ll
llvm/trunk/test/CodeGen/X86/lsr-loop-exit-cond.ll
llvm/trunk/test/CodeGen/X86/lsr-normalization.ll
llvm/trunk/test/CodeGen/X86/lsr-redundant-addressing.ll
llvm/trunk/test/CodeGen/X86/lsr-reuse-trunc.ll
llvm/trunk/test/CodeGen/X86/lsr-reuse.ll
llvm/trunk/test/CodeGen/X86/lsr-static-addr.ll
llvm/trunk/test/CodeGen/X86/lsr-wrap.ll
llvm/trunk/test/CodeGen/X86/lzcnt-tzcnt.ll
llvm/trunk/test/CodeGen/X86/machine-cse.ll
llvm/trunk/test/CodeGen/X86/masked-iv-safe.ll
llvm/trunk/test/CodeGen/X86/masked-iv-unsafe.ll
llvm/trunk/test/CodeGen/X86/mcinst-lowering.ll
llvm/trunk/test/CodeGen/X86/mem-intrin-base-reg.ll
llvm/trunk/test/CodeGen/X86/mem-promote-integers.ll
llvm/trunk/test/CodeGen/X86/misaligned-memset.ll
llvm/trunk/test/CodeGen/X86/misched-aa-colored.ll
llvm/trunk/test/CodeGen/X86/misched-aa-mmos.ll
llvm/trunk/test/CodeGen/X86/misched-balance.ll
llvm/trunk/test/CodeGen/X86/misched-code-difference-with-debug.ll
llvm/trunk/test/CodeGen/X86/misched-crash.ll
llvm/trunk/test/CodeGen/X86/misched-fusion.ll
llvm/trunk/test/CodeGen/X86/misched-matmul.ll
llvm/trunk/test/CodeGen/X86/misched-matrix.ll
llvm/trunk/test/CodeGen/X86/misched-new.ll
llvm/trunk/test/CodeGen/X86/mmx-arg-passing-x86-64.ll
llvm/trunk/test/CodeGen/X86/mmx-arith.ll
llvm/trunk/test/CodeGen/X86/mmx-bitcast.ll
llvm/trunk/test/CodeGen/X86/mmx-copy-gprs.ll
llvm/trunk/test/CodeGen/X86/mmx-fold-load.ll
llvm/trunk/test/CodeGen/X86/movbe.ll
llvm/trunk/test/CodeGen/X86/movfs.ll
llvm/trunk/test/CodeGen/X86/movgs.ll
llvm/trunk/test/CodeGen/X86/movmsk.ll
llvm/trunk/test/CodeGen/X86/movtopush.ll
llvm/trunk/test/CodeGen/X86/ms-inline-asm.ll
llvm/trunk/test/CodeGen/X86/mul128_sext_loop.ll
llvm/trunk/test/CodeGen/X86/muloti.ll
llvm/trunk/test/CodeGen/X86/mult-alt-generic-i686.ll
llvm/trunk/test/CodeGen/X86/mult-alt-generic-x86_64.ll
llvm/trunk/test/CodeGen/X86/mult-alt-x86.ll
llvm/trunk/test/CodeGen/X86/multiple-loop-post-inc.ll
llvm/trunk/test/CodeGen/X86/mulx32.ll
llvm/trunk/test/CodeGen/X86/mulx64.ll
llvm/trunk/test/CodeGen/X86/musttail-indirect.ll
llvm/trunk/test/CodeGen/X86/musttail-varargs.ll
llvm/trunk/test/CodeGen/X86/nancvt.ll
llvm/trunk/test/CodeGen/X86/narrow-shl-load.ll
llvm/trunk/test/CodeGen/X86/narrow_op-1.ll
llvm/trunk/test/CodeGen/X86/negate-add-zero.ll
llvm/trunk/test/CodeGen/X86/no-cmov.ll
llvm/trunk/test/CodeGen/X86/norex-subreg.ll
llvm/trunk/test/CodeGen/X86/nosse-error1.ll
llvm/trunk/test/CodeGen/X86/nosse-error2.ll
llvm/trunk/test/CodeGen/X86/nosse-varargs.ll
llvm/trunk/test/CodeGen/X86/object-size.ll
llvm/trunk/test/CodeGen/X86/opt-ext-uses.ll
llvm/trunk/test/CodeGen/X86/optimize-max-0.ll
llvm/trunk/test/CodeGen/X86/optimize-max-2.ll
llvm/trunk/test/CodeGen/X86/optimize-max-3.ll
llvm/trunk/test/CodeGen/X86/packed_struct.ll
llvm/trunk/test/CodeGen/X86/palignr-2.ll
llvm/trunk/test/CodeGen/X86/patchpoint.ll
llvm/trunk/test/CodeGen/X86/peep-test-0.ll
llvm/trunk/test/CodeGen/X86/peep-test-1.ll
llvm/trunk/test/CodeGen/X86/peephole-fold-movsd.ll
llvm/trunk/test/CodeGen/X86/peephole-multiple-folds.ll
llvm/trunk/test/CodeGen/X86/phi-bit-propagation.ll
llvm/trunk/test/CodeGen/X86/phielim-split.ll
llvm/trunk/test/CodeGen/X86/phys-reg-local-regalloc.ll
llvm/trunk/test/CodeGen/X86/phys_subreg_coalesce-3.ll
llvm/trunk/test/CodeGen/X86/pic.ll
llvm/trunk/test/CodeGen/X86/pic_jumptable.ll
llvm/trunk/test/CodeGen/X86/pmovext.ll
llvm/trunk/test/CodeGen/X86/pmovsx-inreg.ll
llvm/trunk/test/CodeGen/X86/pmulld.ll
llvm/trunk/test/CodeGen/X86/pointer-vector.ll
llvm/trunk/test/CodeGen/X86/postra-licm.ll
llvm/trunk/test/CodeGen/X86/pr10475.ll
llvm/trunk/test/CodeGen/X86/pr10525.ll
llvm/trunk/test/CodeGen/X86/pr11334.ll
llvm/trunk/test/CodeGen/X86/pr12360.ll
llvm/trunk/test/CodeGen/X86/pr12889.ll
llvm/trunk/test/CodeGen/X86/pr13209.ll
llvm/trunk/test/CodeGen/X86/pr13859.ll
llvm/trunk/test/CodeGen/X86/pr13899.ll
llvm/trunk/test/CodeGen/X86/pr14161.ll
llvm/trunk/test/CodeGen/X86/pr14562.ll
llvm/trunk/test/CodeGen/X86/pr1505b.ll
llvm/trunk/test/CodeGen/X86/pr15267.ll
llvm/trunk/test/CodeGen/X86/pr15309.ll
llvm/trunk/test/CodeGen/X86/pr18023.ll
llvm/trunk/test/CodeGen/X86/pr18162.ll
llvm/trunk/test/CodeGen/X86/pr18846.ll
llvm/trunk/test/CodeGen/X86/pr20020.ll
llvm/trunk/test/CodeGen/X86/pr2177.ll
llvm/trunk/test/CodeGen/X86/pr2182.ll
llvm/trunk/test/CodeGen/X86/pr2326.ll
llvm/trunk/test/CodeGen/X86/pr2656.ll
llvm/trunk/test/CodeGen/X86/pr2849.ll
llvm/trunk/test/CodeGen/X86/pr2924.ll
llvm/trunk/test/CodeGen/X86/pr2982.ll
llvm/trunk/test/CodeGen/X86/pr3216.ll
llvm/trunk/test/CodeGen/X86/pr3241.ll
llvm/trunk/test/CodeGen/X86/pr3244.ll
llvm/trunk/test/CodeGen/X86/pr3317.ll
llvm/trunk/test/CodeGen/X86/pr3366.ll
llvm/trunk/test/CodeGen/X86/pr9127.ll
llvm/trunk/test/CodeGen/X86/pre-ra-sched.ll
llvm/trunk/test/CodeGen/X86/private-2.ll
llvm/trunk/test/CodeGen/X86/private.ll
llvm/trunk/test/CodeGen/X86/promote-assert-zext.ll
llvm/trunk/test/CodeGen/X86/promote-trunc.ll
llvm/trunk/test/CodeGen/X86/promote.ll
llvm/trunk/test/CodeGen/X86/pshufb-mask-comments.ll
llvm/trunk/test/CodeGen/X86/psubus.ll
llvm/trunk/test/CodeGen/X86/ragreedy-bug.ll
llvm/trunk/test/CodeGen/X86/ragreedy-hoist-spill.ll
llvm/trunk/test/CodeGen/X86/ragreedy-last-chance-recoloring.ll
llvm/trunk/test/CodeGen/X86/rd-mod-wr-eflags.ll
llvm/trunk/test/CodeGen/X86/regalloc-reconcile-broken-hints.ll
llvm/trunk/test/CodeGen/X86/regpressure.ll
llvm/trunk/test/CodeGen/X86/remat-constant.ll
llvm/trunk/test/CodeGen/X86/remat-fold-load.ll
llvm/trunk/test/CodeGen/X86/remat-invalid-liveness.ll
llvm/trunk/test/CodeGen/X86/remat-scalar-zero.ll
llvm/trunk/test/CodeGen/X86/reverse_branches.ll
llvm/trunk/test/CodeGen/X86/rip-rel-address.ll
llvm/trunk/test/CodeGen/X86/rot32.ll
llvm/trunk/test/CodeGen/X86/rot64.ll
llvm/trunk/test/CodeGen/X86/rotate4.ll
llvm/trunk/test/CodeGen/X86/sandybridge-loads.ll
llvm/trunk/test/CodeGen/X86/scalar-extract.ll
llvm/trunk/test/CodeGen/X86/scalar_widen_div.ll
llvm/trunk/test/CodeGen/X86/scalarize-bitcast.ll
llvm/trunk/test/CodeGen/X86/scev-interchange.ll
llvm/trunk/test/CodeGen/X86/segmented-stacks.ll
llvm/trunk/test/CodeGen/X86/seh-safe-div.ll
llvm/trunk/test/CodeGen/X86/select-with-and-or.ll
llvm/trunk/test/CodeGen/X86/select.ll
llvm/trunk/test/CodeGen/X86/setcc-narrowing.ll
llvm/trunk/test/CodeGen/X86/sext-load.ll
llvm/trunk/test/CodeGen/X86/sha.ll
llvm/trunk/test/CodeGen/X86/shift-and.ll
llvm/trunk/test/CodeGen/X86/shift-bmi2.ll
llvm/trunk/test/CodeGen/X86/shift-coalesce.ll
llvm/trunk/test/CodeGen/X86/shift-codegen.ll
llvm/trunk/test/CodeGen/X86/shift-combine.ll
llvm/trunk/test/CodeGen/X86/shift-folding.ll
llvm/trunk/test/CodeGen/X86/shift-one.ll
llvm/trunk/test/CodeGen/X86/shift-parts.ll
llvm/trunk/test/CodeGen/X86/shl-i64.ll
llvm/trunk/test/CodeGen/X86/shl_undef.ll
llvm/trunk/test/CodeGen/X86/shrink-compare.ll
llvm/trunk/test/CodeGen/X86/shuffle-combine-crash.ll
llvm/trunk/test/CodeGen/X86/sibcall-4.ll
llvm/trunk/test/CodeGen/X86/sibcall-5.ll
llvm/trunk/test/CodeGen/X86/sibcall.ll
llvm/trunk/test/CodeGen/X86/simple-zext.ll
llvm/trunk/test/CodeGen/X86/sink-hoist.ll
llvm/trunk/test/CodeGen/X86/slow-incdec.ll
llvm/trunk/test/CodeGen/X86/split-vector-bitcast.ll
llvm/trunk/test/CodeGen/X86/sse-align-0.ll
llvm/trunk/test/CodeGen/X86/sse-align-1.ll
llvm/trunk/test/CodeGen/X86/sse-align-10.ll
llvm/trunk/test/CodeGen/X86/sse-align-12.ll
llvm/trunk/test/CodeGen/X86/sse-align-2.ll
llvm/trunk/test/CodeGen/X86/sse-align-5.ll
llvm/trunk/test/CodeGen/X86/sse-align-6.ll
llvm/trunk/test/CodeGen/X86/sse-align-9.ll
llvm/trunk/test/CodeGen/X86/sse-domains.ll
llvm/trunk/test/CodeGen/X86/sse-intel-ocl.ll
llvm/trunk/test/CodeGen/X86/sse-load-ret.ll
llvm/trunk/test/CodeGen/X86/sse-unaligned-mem-feature.ll
llvm/trunk/test/CodeGen/X86/sse2.ll
llvm/trunk/test/CodeGen/X86/sse3-avx-addsub.ll
llvm/trunk/test/CodeGen/X86/sse3.ll
llvm/trunk/test/CodeGen/X86/sse41-pmovxrm-intrinsics.ll
llvm/trunk/test/CodeGen/X86/sse41.ll
llvm/trunk/test/CodeGen/X86/sse42-intrinsics-x86.ll
llvm/trunk/test/CodeGen/X86/ssp-data-layout.ll
llvm/trunk/test/CodeGen/X86/stack-align.ll
llvm/trunk/test/CodeGen/X86/stack-protector-dbginfo.ll
llvm/trunk/test/CodeGen/X86/stack-protector-vreg-to-vreg-copy.ll
llvm/trunk/test/CodeGen/X86/stack-protector-weight.ll
llvm/trunk/test/CodeGen/X86/stack-protector.ll
llvm/trunk/test/CodeGen/X86/stackmap.ll
llvm/trunk/test/CodeGen/X86/statepoint-forward.ll
llvm/trunk/test/CodeGen/X86/store-narrow.ll
llvm/trunk/test/CodeGen/X86/store_op_load_fold.ll
llvm/trunk/test/CodeGen/X86/store_op_load_fold2.ll
llvm/trunk/test/CodeGen/X86/stride-nine-with-base-reg.ll
llvm/trunk/test/CodeGen/X86/stride-reuse.ll
llvm/trunk/test/CodeGen/X86/subreg-to-reg-0.ll
llvm/trunk/test/CodeGen/X86/subreg-to-reg-2.ll
llvm/trunk/test/CodeGen/X86/subreg-to-reg-4.ll
llvm/trunk/test/CodeGen/X86/subreg-to-reg-6.ll
llvm/trunk/test/CodeGen/X86/switch-bt.ll
llvm/trunk/test/CodeGen/X86/switch-zextload.ll
llvm/trunk/test/CodeGen/X86/tail-call-win64.ll
llvm/trunk/test/CodeGen/X86/tail-dup-addr.ll
llvm/trunk/test/CodeGen/X86/tail-opts.ll
llvm/trunk/test/CodeGen/X86/tailcall-64.ll
llvm/trunk/test/CodeGen/X86/tailcall-returndup-void.ll
llvm/trunk/test/CodeGen/X86/tailcall-ri64.ll
llvm/trunk/test/CodeGen/X86/tailcallbyval.ll
llvm/trunk/test/CodeGen/X86/tailcallbyval64.ll
llvm/trunk/test/CodeGen/X86/tbm-intrinsics-x86_64.ll
llvm/trunk/test/CodeGen/X86/tbm_patterns.ll
llvm/trunk/test/CodeGen/X86/test-shrink-bug.ll
llvm/trunk/test/CodeGen/X86/testl-commute.ll
llvm/trunk/test/CodeGen/X86/tls-addr-non-leaf-function.ll
llvm/trunk/test/CodeGen/X86/tls-local-dynamic.ll
llvm/trunk/test/CodeGen/X86/tls-pic.ll
llvm/trunk/test/CodeGen/X86/tls-pie.ll
llvm/trunk/test/CodeGen/X86/tls.ll
llvm/trunk/test/CodeGen/X86/tlv-1.ll
llvm/trunk/test/CodeGen/X86/trunc-ext-ld-st.ll
llvm/trunk/test/CodeGen/X86/trunc-to-bool.ll
llvm/trunk/test/CodeGen/X86/twoaddr-pass-sink.ll
llvm/trunk/test/CodeGen/X86/unaligned-32-byte-memops.ll
llvm/trunk/test/CodeGen/X86/unaligned-spill-folding.ll
llvm/trunk/test/CodeGen/X86/unwindraise.ll
llvm/trunk/test/CodeGen/X86/use-add-flags.ll
llvm/trunk/test/CodeGen/X86/v4i32load-crash.ll
llvm/trunk/test/CodeGen/X86/v8i1-masks.ll
llvm/trunk/test/CodeGen/X86/vaargs.ll
llvm/trunk/test/CodeGen/X86/vararg_tailcall.ll
llvm/trunk/test/CodeGen/X86/vec-loadsingles-alignment.ll
llvm/trunk/test/CodeGen/X86/vec-trunc-store.ll
llvm/trunk/test/CodeGen/X86/vec_align.ll
llvm/trunk/test/CodeGen/X86/vec_anyext.ll
llvm/trunk/test/CodeGen/X86/vec_extract-mmx.ll
llvm/trunk/test/CodeGen/X86/vec_extract-sse4.ll
llvm/trunk/test/CodeGen/X86/vec_extract.ll
llvm/trunk/test/CodeGen/X86/vec_fpext.ll
llvm/trunk/test/CodeGen/X86/vec_i64.ll
llvm/trunk/test/CodeGen/X86/vec_ins_extract.ll
llvm/trunk/test/CodeGen/X86/vec_insert-5.ll
llvm/trunk/test/CodeGen/X86/vec_insert-mmx.ll
llvm/trunk/test/CodeGen/X86/vec_loadsingles.ll
llvm/trunk/test/CodeGen/X86/vec_logical.ll
llvm/trunk/test/CodeGen/X86/vec_set-7.ll
llvm/trunk/test/CodeGen/X86/vec_set-F.ll
llvm/trunk/test/CodeGen/X86/vec_setcc-2.ll
llvm/trunk/test/CodeGen/X86/vec_ss_load_fold.ll
llvm/trunk/test/CodeGen/X86/vec_trunc_sext.ll
llvm/trunk/test/CodeGen/X86/vec_zero.ll
llvm/trunk/test/CodeGen/X86/vector-gep.ll
llvm/trunk/test/CodeGen/X86/vector-intrinsics.ll
llvm/trunk/test/CodeGen/X86/vector-sext.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v2.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-128-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v4.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-256-v8.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-combining.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-mmx.ll
llvm/trunk/test/CodeGen/X86/vector-shuffle-sse1.ll
llvm/trunk/test/CodeGen/X86/vector-variable-idx2.ll
llvm/trunk/test/CodeGen/X86/vector-zext.ll
llvm/trunk/test/CodeGen/X86/vector-zmov.ll
llvm/trunk/test/CodeGen/X86/vector.ll
llvm/trunk/test/CodeGen/X86/viabs.ll
llvm/trunk/test/CodeGen/X86/visibility2.ll
llvm/trunk/test/CodeGen/X86/volatile.ll
llvm/trunk/test/CodeGen/X86/vselect-avx.ll
llvm/trunk/test/CodeGen/X86/vselect-minmax.ll
llvm/trunk/test/CodeGen/X86/vshift-5.ll
llvm/trunk/test/CodeGen/X86/vshift-6.ll
llvm/trunk/test/CodeGen/X86/weak_def_can_be_hidden.ll
llvm/trunk/test/CodeGen/X86/widen_arith-1.ll
llvm/trunk/test/CodeGen/X86/widen_arith-2.ll
llvm/trunk/test/CodeGen/X86/widen_arith-3.ll
llvm/trunk/test/CodeGen/X86/widen_arith-4.ll
llvm/trunk/test/CodeGen/X86/widen_arith-5.ll
llvm/trunk/test/CodeGen/X86/widen_arith-6.ll
llvm/trunk/test/CodeGen/X86/widen_cast-1.ll
llvm/trunk/test/CodeGen/X86/widen_cast-2.ll
llvm/trunk/test/CodeGen/X86/widen_cast-4.ll
llvm/trunk/test/CodeGen/X86/widen_conversions.ll
llvm/trunk/test/CodeGen/X86/widen_load-0.ll
llvm/trunk/test/CodeGen/X86/widen_load-1.ll
llvm/trunk/test/CodeGen/X86/widen_load-2.ll
llvm/trunk/test/CodeGen/X86/win32_sret.ll
llvm/trunk/test/CodeGen/X86/win64_eh.ll
llvm/trunk/test/CodeGen/X86/win_eh_prepare.ll
llvm/trunk/test/CodeGen/X86/x32-function_pointer-1.ll
llvm/trunk/test/CodeGen/X86/x86-64-gv-offset.ll
llvm/trunk/test/CodeGen/X86/x86-64-jumps.ll
llvm/trunk/test/CodeGen/X86/x86-64-mem.ll
llvm/trunk/test/CodeGen/X86/x86-64-pic-4.ll
llvm/trunk/test/CodeGen/X86/x86-64-pic-5.ll
llvm/trunk/test/CodeGen/X86/x86-64-pic-6.ll
llvm/trunk/test/CodeGen/X86/x86-64-ptr-arg-simple.ll
llvm/trunk/test/CodeGen/X86/x86-64-sret-return.ll
llvm/trunk/test/CodeGen/X86/x86-64-static-relo-movl.ll
llvm/trunk/test/CodeGen/X86/x86-mixed-alignment-dagcombine.ll
llvm/trunk/test/CodeGen/X86/xop-intrinsics-x86_64.ll
llvm/trunk/test/CodeGen/X86/zext-extract_subreg.ll
llvm/trunk/test/CodeGen/X86/zext-sext.ll
llvm/trunk/test/CodeGen/X86/zlib-longest-match.ll
llvm/trunk/test/CodeGen/XCore/2009-01-08-Crash.ll
llvm/trunk/test/CodeGen/XCore/2010-02-25-LSR-Crash.ll
llvm/trunk/test/CodeGen/XCore/2011-01-31-DAGCombineBug.ll
llvm/trunk/test/CodeGen/XCore/atomic.ll
llvm/trunk/test/CodeGen/XCore/codemodel.ll
llvm/trunk/test/CodeGen/XCore/dwarf_debug.ll
llvm/trunk/test/CodeGen/XCore/exception.ll
llvm/trunk/test/CodeGen/XCore/indirectbr.ll
llvm/trunk/test/CodeGen/XCore/llvm-intrinsics.ll
llvm/trunk/test/CodeGen/XCore/load.ll
llvm/trunk/test/CodeGen/XCore/private.ll
llvm/trunk/test/CodeGen/XCore/scavenging.ll
llvm/trunk/test/CodeGen/XCore/trampoline.ll
llvm/trunk/test/CodeGen/XCore/unaligned_load.ll
llvm/trunk/test/CodeGen/XCore/unaligned_store_combine.ll
llvm/trunk/test/CodeGen/XCore/zextfree.ll
llvm/trunk/test/DebugInfo/2010-01-05-DbgScope.ll
llvm/trunk/test/DebugInfo/2010-03-24-MemberFn.ll
llvm/trunk/test/DebugInfo/2010-04-06-NestedFnDbgInfo.ll
llvm/trunk/test/DebugInfo/2010-04-19-FramePtr.ll
llvm/trunk/test/DebugInfo/2010-05-03-OriginDIE.ll
llvm/trunk/test/DebugInfo/2010-06-29-InlinedFnLocalVar.ll
llvm/trunk/test/DebugInfo/AArch64/frameindices.ll
llvm/trunk/test/DebugInfo/AArch64/struct_by_value.ll
llvm/trunk/test/DebugInfo/ARM/lowerbdgdeclare_vla.ll
llvm/trunk/test/DebugInfo/COFF/cpp-mangling.ll
llvm/trunk/test/DebugInfo/Inputs/line.ll
llvm/trunk/test/DebugInfo/PR20038.ll
llvm/trunk/test/DebugInfo/SystemZ/variable-loc.ll
llvm/trunk/test/DebugInfo/X86/2010-04-13-PubType.ll
llvm/trunk/test/DebugInfo/X86/2011-09-26-GlobalVarContext.ll
llvm/trunk/test/DebugInfo/X86/2011-12-16-BadStructRef.ll
llvm/trunk/test/DebugInfo/X86/DW_AT_byte_size.ll
llvm/trunk/test/DebugInfo/X86/DW_AT_linkage_name.ll
llvm/trunk/test/DebugInfo/X86/DW_AT_object_pointer.ll
llvm/trunk/test/DebugInfo/X86/arguments.ll
llvm/trunk/test/DebugInfo/X86/array.ll
llvm/trunk/test/DebugInfo/X86/array2.ll
llvm/trunk/test/DebugInfo/X86/block-capture.ll
llvm/trunk/test/DebugInfo/X86/byvalstruct.ll
llvm/trunk/test/DebugInfo/X86/cu-ranges-odr.ll
llvm/trunk/test/DebugInfo/X86/cu-ranges.ll
llvm/trunk/test/DebugInfo/X86/dbg-byval-parameter.ll
llvm/trunk/test/DebugInfo/X86/dbg-declare-arg.ll
llvm/trunk/test/DebugInfo/X86/dbg-declare.ll
llvm/trunk/test/DebugInfo/X86/dbg-prolog-end.ll
llvm/trunk/test/DebugInfo/X86/dbg-value-dag-combine.ll
llvm/trunk/test/DebugInfo/X86/dbg-value-location.ll
llvm/trunk/test/DebugInfo/X86/dbg-value-range.ll
llvm/trunk/test/DebugInfo/X86/dbg-value-terminator.ll
llvm/trunk/test/DebugInfo/X86/dbg_value_direct.ll
llvm/trunk/test/DebugInfo/X86/debug-info-blocks.ll
llvm/trunk/test/DebugInfo/X86/debug-info-static-member.ll
llvm/trunk/test/DebugInfo/X86/debug-loc-asan.ll
llvm/trunk/test/DebugInfo/X86/debug-loc-offset.ll
llvm/trunk/test/DebugInfo/X86/debug-ranges-offset.ll
llvm/trunk/test/DebugInfo/X86/decl-derived-member.ll
llvm/trunk/test/DebugInfo/X86/discriminator.ll
llvm/trunk/test/DebugInfo/X86/dwarf-aranges-no-dwarf-labels.ll
llvm/trunk/test/DebugInfo/X86/dwarf-aranges.ll
llvm/trunk/test/DebugInfo/X86/dwarf-public-names.ll
llvm/trunk/test/DebugInfo/X86/elf-names.ll
llvm/trunk/test/DebugInfo/X86/empty-and-one-elem-array.ll
llvm/trunk/test/DebugInfo/X86/ending-run.ll
llvm/trunk/test/DebugInfo/X86/fission-ranges.ll
llvm/trunk/test/DebugInfo/X86/formal_parameter.ll
llvm/trunk/test/DebugInfo/X86/generate-odr-hash.ll
llvm/trunk/test/DebugInfo/X86/gnu-public-names.ll
llvm/trunk/test/DebugInfo/X86/inline-member-function.ll
llvm/trunk/test/DebugInfo/X86/inline-seldag-test.ll
llvm/trunk/test/DebugInfo/X86/instcombine-instrinsics.ll
llvm/trunk/test/DebugInfo/X86/lexical_block.ll
llvm/trunk/test/DebugInfo/X86/line-info.ll
llvm/trunk/test/DebugInfo/X86/linkage-name.ll
llvm/trunk/test/DebugInfo/X86/misched-dbg-value.ll
llvm/trunk/test/DebugInfo/X86/nodebug_with_debug_loc.ll
llvm/trunk/test/DebugInfo/X86/op_deref.ll
llvm/trunk/test/DebugInfo/X86/parameters.ll
llvm/trunk/test/DebugInfo/X86/pieces-2.ll
llvm/trunk/test/DebugInfo/X86/pr11300.ll
llvm/trunk/test/DebugInfo/X86/pr12831.ll
llvm/trunk/test/DebugInfo/X86/pr19307.ll
llvm/trunk/test/DebugInfo/X86/recursive_inlining.ll
llvm/trunk/test/DebugInfo/X86/reference-argument.ll
llvm/trunk/test/DebugInfo/X86/rvalue-ref.ll
llvm/trunk/test/DebugInfo/X86/sret.ll
llvm/trunk/test/DebugInfo/X86/sroasplit-1.ll
llvm/trunk/test/DebugInfo/X86/sroasplit-2.ll
llvm/trunk/test/DebugInfo/X86/sroasplit-3.ll
llvm/trunk/test/DebugInfo/X86/sroasplit-4.ll
llvm/trunk/test/DebugInfo/X86/sroasplit-5.ll
llvm/trunk/test/DebugInfo/X86/stmt-list-multiple-compile-units.ll
llvm/trunk/test/DebugInfo/X86/subregisters.ll
llvm/trunk/test/DebugInfo/X86/vla.ll
llvm/trunk/test/DebugInfo/block-asan.ll
llvm/trunk/test/DebugInfo/cross-cu-inlining.ll
llvm/trunk/test/DebugInfo/cross-cu-linkonce-distinct.ll
llvm/trunk/test/DebugInfo/cross-cu-linkonce.ll
llvm/trunk/test/DebugInfo/cu-range-hole.ll
llvm/trunk/test/DebugInfo/cu-ranges.ll
llvm/trunk/test/DebugInfo/debug-info-always-inline.ll
llvm/trunk/test/DebugInfo/dwarf-public-names.ll
llvm/trunk/test/DebugInfo/incorrect-variable-debugloc.ll
llvm/trunk/test/DebugInfo/incorrect-variable-debugloc1.ll
llvm/trunk/test/DebugInfo/inheritance.ll
llvm/trunk/test/DebugInfo/inline-debug-info-multiret.ll
llvm/trunk/test/DebugInfo/inline-debug-info.ll
llvm/trunk/test/DebugInfo/inline-scopes.ll
llvm/trunk/test/DebugInfo/member-order.ll
llvm/trunk/test/DebugInfo/missing-abstract-variable.ll
llvm/trunk/test/DebugInfo/namespace.ll
llvm/trunk/test/DebugInfo/namespace_inline_function_definition.ll
llvm/trunk/test/DebugInfo/tu-composite.ll
llvm/trunk/test/DebugInfo/unconditional-branch.ll
llvm/trunk/test/ExecutionEngine/MCJIT/2002-12-16-ArgTest.ll
llvm/trunk/test/ExecutionEngine/MCJIT/2003-05-06-LivenessClobber.ll
llvm/trunk/test/ExecutionEngine/MCJIT/2003-05-07-ArgumentTest.ll
llvm/trunk/test/ExecutionEngine/MCJIT/2003-08-21-EnvironmentTest.ll
llvm/trunk/test/ExecutionEngine/MCJIT/2007-12-10-APIntLoadStore.ll
llvm/trunk/test/ExecutionEngine/MCJIT/2008-06-05-APInt-OverAShr.ll
llvm/trunk/test/ExecutionEngine/MCJIT/2013-04-04-RelocAddend.ll
llvm/trunk/test/ExecutionEngine/MCJIT/pr13727.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/stubs-remote.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/stubs-sm-pic.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/test-common-symbols-remote.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/test-fp-no-external-funcs-remote.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-remote.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/test-global-init-nonzero-sm-pic.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-remote.ll
llvm/trunk/test/ExecutionEngine/MCJIT/remote/test-ptr-reloc-sm-pic.ll
llvm/trunk/test/ExecutionEngine/MCJIT/stubs-sm-pic.ll
llvm/trunk/test/ExecutionEngine/MCJIT/stubs.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-common-symbols-alignment.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-common-symbols.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-fp-no-external-funcs.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-fp.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-global-ctors.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-global-init-nonzero-sm-pic.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-global-init-nonzero.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-global.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-loadstore.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-local.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-ptr-reloc-sm-pic.ll
llvm/trunk/test/ExecutionEngine/MCJIT/test-ptr-reloc.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/2002-12-16-ArgTest.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/2003-05-06-LivenessClobber.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/2003-05-07-ArgumentTest.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/2003-08-21-EnvironmentTest.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/2007-12-10-APIntLoadStore.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/2008-06-05-APInt-OverAShr.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/2013-04-04-RelocAddend.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/pr13727.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/stubs-remote.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/stubs-sm-pic.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/test-common-symbols-remote.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/test-fp-no-external-funcs-remote.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/test-global-init-nonzero-remote.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/test-global-init-nonzero-sm-pic.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/test-ptr-reloc-remote.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/remote/test-ptr-reloc-sm-pic.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/stubs-sm-pic.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/stubs.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-common-symbols-alignment.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-common-symbols.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-fp-no-external-funcs.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-fp.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-global-ctors.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-global-init-nonzero-sm-pic.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-global-init-nonzero.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-global.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-loadstore.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-local.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-ptr-reloc-sm-pic.ll
llvm/trunk/test/ExecutionEngine/OrcJIT/test-ptr-reloc.ll
llvm/trunk/test/ExecutionEngine/frem.ll
llvm/trunk/test/ExecutionEngine/test-interp-vec-loadstore.ll
llvm/trunk/test/Feature/aliases.ll
llvm/trunk/test/Feature/md_on_instruction.ll
llvm/trunk/test/Feature/memorymarkers.ll
llvm/trunk/test/Feature/optnone-llc.ll
llvm/trunk/test/Feature/optnone-opt.ll
llvm/trunk/test/Feature/packed.ll
llvm/trunk/test/Feature/packed_struct.ll
llvm/trunk/test/Feature/ppcld.ll
llvm/trunk/test/Feature/recursivetype.ll
llvm/trunk/test/Feature/sparcld.ll
llvm/trunk/test/Feature/testalloca.ll
llvm/trunk/test/Feature/varargs_new.ll
llvm/trunk/test/Feature/weak_constant.ll
llvm/trunk/test/Feature/x86ld.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/X86/bug_11395.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/asan-vs-gvn.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/basic.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/debug_info.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/do-not-instrument-promotable-allocas.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/freebsd.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/global_metadata.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/instrument-dynamic-allocas.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/instrument_global.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/instrument_load_then_store.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/instrumentation-with-call-threshold.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/stack-poisoning.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/stack_dynamic_alloca.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/test64.ll
llvm/trunk/test/Instrumentation/AddressSanitizer/ubsan.ll
llvm/trunk/test/Instrumentation/BoundsChecking/many-trap.ll
llvm/trunk/test/Instrumentation/BoundsChecking/phi.ll
llvm/trunk/test/Instrumentation/BoundsChecking/simple-32.ll
llvm/trunk/test/Instrumentation/BoundsChecking/simple.ll
llvm/trunk/test/Instrumentation/DataFlowSanitizer/abilist.ll
llvm/trunk/test/Instrumentation/DataFlowSanitizer/debug-nonzero-labels.ll
llvm/trunk/test/Instrumentation/DataFlowSanitizer/load.ll
llvm/trunk/test/Instrumentation/DataFlowSanitizer/store.ll
llvm/trunk/test/Instrumentation/InstrProfiling/linkage.ll
llvm/trunk/test/Instrumentation/InstrProfiling/noruntime.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/array_types.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/atomics.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/check_access_address.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/instrumentation-with-call-threshold.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/missing_origin.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/msan_basic.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/unreachable.ll
llvm/trunk/test/Instrumentation/MemorySanitizer/vector_cvt.ll
llvm/trunk/test/Instrumentation/SanitizerCoverage/coverage-dbg.ll
llvm/trunk/test/Instrumentation/SanitizerCoverage/coverage.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/atomic.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/capture.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/no_sanitize_thread.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/read_before_write.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/read_from_global.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/tsan-vs-gvn.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/tsan_basic.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/unaligned.ll
llvm/trunk/test/Instrumentation/ThreadSanitizer/vptr_read.ll
llvm/trunk/test/Integer/2007-01-19-TruncSext.ll
llvm/trunk/test/Integer/BitPacked.ll
llvm/trunk/test/Integer/packed_bt.ll
llvm/trunk/test/Integer/packed_struct_bt.ll
llvm/trunk/test/JitListener/multiple.ll
llvm/trunk/test/JitListener/simple.ll
llvm/trunk/test/LTO/X86/cfi_endproc.ll
llvm/trunk/test/LTO/X86/linkonce_odr_func.ll
llvm/trunk/test/LTO/X86/set-merged.ll
llvm/trunk/test/Linker/2004-05-07-TypeResolution2.ll
llvm/trunk/test/Linker/2008-03-05-AliasReference.ll
llvm/trunk/test/Linker/2009-09-03-mdnode.ll
llvm/trunk/test/Linker/2009-09-03-mdnode2.ll
llvm/trunk/test/Linker/DbgDeclare.ll
llvm/trunk/test/Linker/DbgDeclare2.ll
llvm/trunk/test/Linker/Inputs/linkage.b.ll
llvm/trunk/test/Linker/Inputs/replaced-function-matches-first-subprogram.ll
llvm/trunk/test/Linker/Inputs/testlink.ll
llvm/trunk/test/Linker/link-global-to-func.ll
llvm/trunk/test/Linker/partial-type-refinement.ll
llvm/trunk/test/Linker/replaced-function-matches-first-subprogram.ll
llvm/trunk/test/Linker/testlink.ll
llvm/trunk/test/Linker/type-unique-odr-b.ll
llvm/trunk/test/Linker/type-unique-simple2-a.ll
llvm/trunk/test/Linker/type-unique-simple2-b.ll
llvm/trunk/test/Linker/type-unique-type-array-a.ll
llvm/trunk/test/Linker/type-unique-type-array-b.ll
llvm/trunk/test/MC/AArch64/elf-globaladdress.ll
llvm/trunk/test/MC/ARM/data-in-code.ll
llvm/trunk/test/MC/ARM/elf-reloc-03.ll
llvm/trunk/test/MC/COFF/tricky-names.ll
llvm/trunk/test/MC/MachO/tlv-bss.ll
llvm/trunk/test/MC/MachO/x86-data-in-code.ll
llvm/trunk/test/MC/Mips/elf-bigendian.ll
llvm/trunk/test/Other/2004-08-16-PackedGlobalConstant.ll
llvm/trunk/test/Other/2004-08-16-PackedSelect.ll
llvm/trunk/test/Other/2004-08-16-PackedSimple.ll
llvm/trunk/test/Other/2004-08-20-PackedControlFlow.ll
llvm/trunk/test/Other/2007-09-10-PassManager.ll
llvm/trunk/test/Other/lint.ll
llvm/trunk/test/Other/optimization-remarks-inline.ll
llvm/trunk/test/Transforms/ADCE/2002-05-23-ZeroArgPHITest.ll
llvm/trunk/test/Transforms/ADCE/2002-05-28-Crash.ll
llvm/trunk/test/Transforms/ADCE/2002-07-17-AssertionFailure.ll
llvm/trunk/test/Transforms/ADCE/2002-07-17-PHIAssertion.ll
llvm/trunk/test/Transforms/ADCE/2003-06-11-InvalidCFG.ll
llvm/trunk/test/Transforms/ADCE/2003-06-24-BadSuccessor.ll
llvm/trunk/test/Transforms/ADCE/2003-06-24-BasicFunctionality.ll
llvm/trunk/test/Transforms/ADCE/basictest1.ll
llvm/trunk/test/Transforms/ADCE/basictest2.ll
llvm/trunk/test/Transforms/AddDiscriminators/basic.ll
llvm/trunk/test/Transforms/AddDiscriminators/first-only.ll
llvm/trunk/test/Transforms/AddDiscriminators/multiple.ll
llvm/trunk/test/Transforms/AddDiscriminators/no-discriminators.ll
llvm/trunk/test/Transforms/AlignmentFromAssumptions/simple.ll
llvm/trunk/test/Transforms/AlignmentFromAssumptions/simple32.ll
llvm/trunk/test/Transforms/AlignmentFromAssumptions/start-unk.ll
llvm/trunk/test/Transforms/ArgumentPromotion/2008-02-01-ReturnAttrs.ll
llvm/trunk/test/Transforms/ArgumentPromotion/2008-07-02-array-indexing.ll
llvm/trunk/test/Transforms/ArgumentPromotion/aggregate-promote.ll
llvm/trunk/test/Transforms/ArgumentPromotion/attrs.ll
llvm/trunk/test/Transforms/ArgumentPromotion/basictest.ll
llvm/trunk/test/Transforms/ArgumentPromotion/byval-2.ll
llvm/trunk/test/Transforms/ArgumentPromotion/byval.ll
llvm/trunk/test/Transforms/ArgumentPromotion/chained.ll
llvm/trunk/test/Transforms/ArgumentPromotion/control-flow.ll
llvm/trunk/test/Transforms/ArgumentPromotion/control-flow2.ll
llvm/trunk/test/Transforms/ArgumentPromotion/crash.ll
llvm/trunk/test/Transforms/ArgumentPromotion/dbg.ll
llvm/trunk/test/Transforms/ArgumentPromotion/fp80.ll
llvm/trunk/test/Transforms/ArgumentPromotion/inalloca.ll
llvm/trunk/test/Transforms/ArgumentPromotion/reserve-tbaa.ll
llvm/trunk/test/Transforms/BBVectorize/X86/loop1.ll
llvm/trunk/test/Transforms/BBVectorize/X86/sh-rec2.ll
llvm/trunk/test/Transforms/BBVectorize/X86/sh-rec3.ll
llvm/trunk/test/Transforms/BBVectorize/X86/simple-ldstr.ll
llvm/trunk/test/Transforms/BBVectorize/X86/wr-aliases.ll
llvm/trunk/test/Transforms/BBVectorize/func-alias.ll
llvm/trunk/test/Transforms/BBVectorize/ld1.ll
llvm/trunk/test/Transforms/BBVectorize/loop1.ll
llvm/trunk/test/Transforms/BBVectorize/mem-op-depth.ll
llvm/trunk/test/Transforms/BBVectorize/metadata.ll
llvm/trunk/test/Transforms/BBVectorize/no-ldstr-conn.ll
llvm/trunk/test/Transforms/BBVectorize/simple-ldstr-ptrs.ll
llvm/trunk/test/Transforms/BBVectorize/simple-ldstr.ll
llvm/trunk/test/Transforms/CodeExtractor/2004-03-14-DominanceProblem.ll
llvm/trunk/test/Transforms/CodeGenPrepare/X86/extend-sink-hoist.ll
llvm/trunk/test/Transforms/CodeGenPrepare/X86/sink-addrspacecast.ll
llvm/trunk/test/Transforms/CodeGenPrepare/statepoint-relocate.ll
llvm/trunk/test/Transforms/ConstProp/loads.ll
llvm/trunk/test/Transforms/ConstantHoisting/AArch64/const-addr.ll
llvm/trunk/test/Transforms/ConstantHoisting/PowerPC/const-base-addr.ll
llvm/trunk/test/Transforms/ConstantHoisting/PowerPC/masks.ll
llvm/trunk/test/Transforms/ConstantHoisting/X86/cast-inst.ll
llvm/trunk/test/Transforms/ConstantHoisting/X86/const-base-addr.ll
llvm/trunk/test/Transforms/CorrelatedValuePropagation/basic.ll
llvm/trunk/test/Transforms/CorrelatedValuePropagation/non-null.ll
llvm/trunk/test/Transforms/DeadArgElim/aggregates.ll
llvm/trunk/test/Transforms/DeadArgElim/deadexternal.ll
llvm/trunk/test/Transforms/DeadArgElim/deadretval2.ll
llvm/trunk/test/Transforms/DeadArgElim/keepalive.ll
llvm/trunk/test/Transforms/DeadStoreElimination/2011-03-25-DSEMiscompile.ll
llvm/trunk/test/Transforms/DeadStoreElimination/2011-09-06-EndOfFunction.ll
llvm/trunk/test/Transforms/DeadStoreElimination/2011-09-06-MemCpy.ll
llvm/trunk/test/Transforms/DeadStoreElimination/PartialStore.ll
llvm/trunk/test/Transforms/DeadStoreElimination/atomic.ll
llvm/trunk/test/Transforms/DeadStoreElimination/const-pointers.ll
llvm/trunk/test/Transforms/DeadStoreElimination/crash.ll
llvm/trunk/test/Transforms/DeadStoreElimination/free.ll
llvm/trunk/test/Transforms/DeadStoreElimination/simple.ll
llvm/trunk/test/Transforms/EarlyCSE/basic.ll
llvm/trunk/test/Transforms/FunctionAttrs/2008-09-03-ReadNone.ll
llvm/trunk/test/Transforms/FunctionAttrs/2008-09-13-VolatileRead.ll
llvm/trunk/test/Transforms/FunctionAttrs/2008-12-29-Constant.ll
llvm/trunk/test/Transforms/FunctionAttrs/2009-01-02-LocalStores.ll
llvm/trunk/test/Transforms/FunctionAttrs/2010-10-30-volatile.ll
llvm/trunk/test/Transforms/FunctionAttrs/atomic.ll
llvm/trunk/test/Transforms/FunctionAttrs/nocapture.ll
llvm/trunk/test/Transforms/FunctionAttrs/optnone-simple.ll
llvm/trunk/test/Transforms/GCOVProfiling/linezero.ll
llvm/trunk/test/Transforms/GCOVProfiling/return-block.ll
llvm/trunk/test/Transforms/GVN/2007-07-25-DominatedLoop.ll
llvm/trunk/test/Transforms/GVN/2007-07-25-InfiniteLoop.ll
llvm/trunk/test/Transforms/GVN/2007-07-25-Loop.ll
llvm/trunk/test/Transforms/GVN/2007-07-25-NestedLoop.ll
llvm/trunk/test/Transforms/GVN/2007-07-25-SinglePredecessor.ll
llvm/trunk/test/Transforms/GVN/2007-07-26-InterlockingLoops.ll
llvm/trunk/test/Transforms/GVN/2007-07-26-NonRedundant.ll
llvm/trunk/test/Transforms/GVN/2007-07-26-PhiErasure.ll
llvm/trunk/test/Transforms/GVN/2007-07-30-PredIDom.ll
llvm/trunk/test/Transforms/GVN/2007-07-31-NoDomInherit.ll
llvm/trunk/test/Transforms/GVN/2007-07-31-RedundantPhi.ll
llvm/trunk/test/Transforms/GVN/2008-02-12-UndefLoad.ll
llvm/trunk/test/Transforms/GVN/2008-02-13-NewPHI.ll
llvm/trunk/test/Transforms/GVN/2008-07-02-Unreachable.ll
llvm/trunk/test/Transforms/GVN/2008-12-09-SelfRemove.ll
llvm/trunk/test/Transforms/GVN/2008-12-12-RLE-Crash.ll
llvm/trunk/test/Transforms/GVN/2008-12-14-rle-reanalyze.ll
llvm/trunk/test/Transforms/GVN/2008-12-15-CacheVisited.ll
llvm/trunk/test/Transforms/GVN/2009-01-21-SortInvalidation.ll
llvm/trunk/test/Transforms/GVN/2009-01-22-SortInvalidation.ll
llvm/trunk/test/Transforms/GVN/2009-02-17-LoadPRECrash.ll
llvm/trunk/test/Transforms/GVN/2009-06-17-InvalidPRE.ll
llvm/trunk/test/Transforms/GVN/2009-07-13-MemDepSortFail.ll
llvm/trunk/test/Transforms/GVN/2009-11-12-MemDepMallocBitCast.ll
llvm/trunk/test/Transforms/GVN/2010-03-31-RedundantPHIs.ll
llvm/trunk/test/Transforms/GVN/2010-05-08-OneBit.ll
llvm/trunk/test/Transforms/GVN/2011-04-27-phioperands.ll
llvm/trunk/test/Transforms/GVN/2011-06-01-NonLocalMemdepMiscompile.ll
llvm/trunk/test/Transforms/GVN/MemdepMiscompile.ll
llvm/trunk/test/Transforms/GVN/atomic.ll
llvm/trunk/test/Transforms/GVN/calloc-load-removal.ll
llvm/trunk/test/Transforms/GVN/cond_br.ll
llvm/trunk/test/Transforms/GVN/cond_br2.ll
llvm/trunk/test/Transforms/GVN/condprop.ll
llvm/trunk/test/Transforms/GVN/crash-no-aa.ll
llvm/trunk/test/Transforms/GVN/crash.ll
llvm/trunk/test/Transforms/GVN/invariant-load.ll
llvm/trunk/test/Transforms/GVN/lifetime-simple.ll
llvm/trunk/test/Transforms/GVN/load-constant-mem.ll
llvm/trunk/test/Transforms/GVN/load-from-unreachable-predecessor.ll
llvm/trunk/test/Transforms/GVN/load-pre-align.ll
llvm/trunk/test/Transforms/GVN/load-pre-licm.ll
llvm/trunk/test/Transforms/GVN/load-pre-nonlocal.ll
llvm/trunk/test/Transforms/GVN/lpre-call-wrap-2.ll
llvm/trunk/test/Transforms/GVN/lpre-call-wrap.ll
llvm/trunk/test/Transforms/GVN/malloc-load-removal.ll
llvm/trunk/test/Transforms/GVN/noalias.ll
llvm/trunk/test/Transforms/GVN/non-local-offset.ll
llvm/trunk/test/Transforms/GVN/nonescaping-malloc.ll
llvm/trunk/test/Transforms/GVN/null-aliases-nothing.ll
llvm/trunk/test/Transforms/GVN/phi-translate-partial-alias.ll
llvm/trunk/test/Transforms/GVN/phi-translate.ll
llvm/trunk/test/Transforms/GVN/pr10820.ll
llvm/trunk/test/Transforms/GVN/pr14166.ll
llvm/trunk/test/Transforms/GVN/pr17732.ll
llvm/trunk/test/Transforms/GVN/pr17852.ll
llvm/trunk/test/Transforms/GVN/pre-basic-add.ll
llvm/trunk/test/Transforms/GVN/pre-gep-load.ll
llvm/trunk/test/Transforms/GVN/pre-load.ll
llvm/trunk/test/Transforms/GVN/pre-single-pred.ll
llvm/trunk/test/Transforms/GVN/preserve-tbaa.ll
llvm/trunk/test/Transforms/GVN/range.ll
llvm/trunk/test/Transforms/GVN/readattrs.ll
llvm/trunk/test/Transforms/GVN/rle-must-alias.ll
llvm/trunk/test/Transforms/GVN/rle-no-phi-translate.ll
llvm/trunk/test/Transforms/GVN/rle-nonlocal.ll
llvm/trunk/test/Transforms/GVN/rle-phi-translate.ll
llvm/trunk/test/Transforms/GVN/rle-semidominated.ll
llvm/trunk/test/Transforms/GVN/rle.ll
llvm/trunk/test/Transforms/GVN/tbaa.ll
llvm/trunk/test/Transforms/GVN/volatile.ll
llvm/trunk/test/Transforms/GlobalDCE/2002-08-17-FunctionDGE.ll
llvm/trunk/test/Transforms/GlobalDCE/2002-09-12-Redeletion.ll
llvm/trunk/test/Transforms/GlobalDCE/complex-constantexpr.ll
llvm/trunk/test/Transforms/GlobalDCE/global_ctors_integration.ll
llvm/trunk/test/Transforms/GlobalDCE/indirectbr.ll
llvm/trunk/test/Transforms/GlobalOpt/2004-10-10-CastStoreOnce.ll
llvm/trunk/test/Transforms/GlobalOpt/2005-06-15-LocalizeConstExprCrash.ll
llvm/trunk/test/Transforms/GlobalOpt/2006-07-07-InlineAsmCrash.ll
llvm/trunk/test/Transforms/GlobalOpt/2006-11-01-ShrinkGlobalPhiCrash.ll
llvm/trunk/test/Transforms/GlobalOpt/2007-04-05-Crash.ll
llvm/trunk/test/Transforms/GlobalOpt/2007-05-13-Crash.ll
llvm/trunk/test/Transforms/GlobalOpt/2007-11-09-GEP-GEP-Crash.ll
llvm/trunk/test/Transforms/GlobalOpt/2008-01-03-Crash.ll
llvm/trunk/test/Transforms/GlobalOpt/2008-01-13-OutOfRangeSROA.ll
llvm/trunk/test/Transforms/GlobalOpt/2008-01-29-VolatileGlobal.ll
llvm/trunk/test/Transforms/GlobalOpt/2008-04-26-SROA-Global-Align.ll
llvm/trunk/test/Transforms/GlobalOpt/2008-07-17-addrspace.ll
llvm/trunk/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash-2.ll
llvm/trunk/test/Transforms/GlobalOpt/2008-12-16-HeapSRACrash.ll
llvm/trunk/test/Transforms/GlobalOpt/2009-01-13-phi-user.ll
llvm/trunk/test/Transforms/GlobalOpt/2009-02-15-BitcastAlias.ll
llvm/trunk/test/Transforms/GlobalOpt/2009-03-05-dbg.ll
llvm/trunk/test/Transforms/GlobalOpt/2009-03-07-PromotePtrToBool.ll
llvm/trunk/test/Transforms/GlobalOpt/2009-06-01-RecursivePHI.ll
llvm/trunk/test/Transforms/GlobalOpt/2009-11-16-BrokenPerformHeapAllocSRoA.ll
llvm/trunk/test/Transforms/GlobalOpt/2009-11-16-MallocSingleStoreToGlobalVar.ll
llvm/trunk/test/Transforms/GlobalOpt/2010-02-25-MallocPromote.ll
llvm/trunk/test/Transforms/GlobalOpt/2010-02-26-MallocSROA.ll
llvm/trunk/test/Transforms/GlobalOpt/array-elem-refs.ll
llvm/trunk/test/Transforms/GlobalOpt/atomic.ll
llvm/trunk/test/Transforms/GlobalOpt/basictest.ll
llvm/trunk/test/Transforms/GlobalOpt/constantfold-initializers.ll
llvm/trunk/test/Transforms/GlobalOpt/crash-2.ll
llvm/trunk/test/Transforms/GlobalOpt/crash.ll
llvm/trunk/test/Transforms/GlobalOpt/ctor-list-opt-inbounds.ll
llvm/trunk/test/Transforms/GlobalOpt/ctor-list-opt.ll
llvm/trunk/test/Transforms/GlobalOpt/deadfunction.ll
llvm/trunk/test/Transforms/GlobalOpt/externally-initialized-global-ctr.ll
llvm/trunk/test/Transforms/GlobalOpt/fastcc.ll
llvm/trunk/test/Transforms/GlobalOpt/globalsra-partial.ll
llvm/trunk/test/Transforms/GlobalOpt/globalsra-unknown-index.ll
llvm/trunk/test/Transforms/GlobalOpt/globalsra.ll
llvm/trunk/test/Transforms/GlobalOpt/heap-sra-1.ll
llvm/trunk/test/Transforms/GlobalOpt/heap-sra-2.ll
llvm/trunk/test/Transforms/GlobalOpt/heap-sra-3.ll
llvm/trunk/test/Transforms/GlobalOpt/heap-sra-4.ll
llvm/trunk/test/Transforms/GlobalOpt/heap-sra-phi.ll
llvm/trunk/test/Transforms/GlobalOpt/integer-bool.ll
llvm/trunk/test/Transforms/GlobalOpt/iterate.ll
llvm/trunk/test/Transforms/GlobalOpt/load-store-global.ll
llvm/trunk/test/Transforms/GlobalOpt/malloc-promote-1.ll
llvm/trunk/test/Transforms/GlobalOpt/malloc-promote-2.ll
llvm/trunk/test/Transforms/GlobalOpt/malloc-promote-3.ll
llvm/trunk/test/Transforms/GlobalOpt/memset-null.ll
llvm/trunk/test/Transforms/GlobalOpt/phi-select.ll
llvm/trunk/test/Transforms/GlobalOpt/storepointer-compare.ll
llvm/trunk/test/Transforms/GlobalOpt/storepointer.ll
llvm/trunk/test/Transforms/GlobalOpt/tls.ll
llvm/trunk/test/Transforms/GlobalOpt/trivialstore.ll
llvm/trunk/test/Transforms/GlobalOpt/undef-init.ll
llvm/trunk/test/Transforms/GlobalOpt/unnamed-addr.ll
llvm/trunk/test/Transforms/GlobalOpt/zeroinitializer-gep-load.ll
llvm/trunk/test/Transforms/IPConstantProp/2009-09-24-byval-ptr.ll
llvm/trunk/test/Transforms/IPConstantProp/dangling-block-address.ll
llvm/trunk/test/Transforms/IPConstantProp/global.ll
llvm/trunk/test/Transforms/IPConstantProp/return-argument.ll
llvm/trunk/test/Transforms/IRCE/decrementing-loop.ll
llvm/trunk/test/Transforms/IRCE/low-becount.ll
llvm/trunk/test/Transforms/IRCE/multiple-access-no-preloop.ll
llvm/trunk/test/Transforms/IRCE/not-likely-taken.ll
llvm/trunk/test/Transforms/IRCE/single-access-no-preloop.ll
llvm/trunk/test/Transforms/IRCE/single-access-with-preloop.ll
llvm/trunk/test/Transforms/IRCE/unhandled.ll
llvm/trunk/test/Transforms/IRCE/with-parent-loops.ll
llvm/trunk/test/Transforms/IndVarSimplify/2005-02-17-TruncateExprCrash.ll
llvm/trunk/test/Transforms/IndVarSimplify/2006-06-16-Indvar-LCSSA-Crash.ll
llvm/trunk/test/Transforms/IndVarSimplify/2007-01-06-TripCount.ll
llvm/trunk/test/Transforms/IndVarSimplify/2008-09-02-IVType.ll
llvm/trunk/test/Transforms/IndVarSimplify/2008-10-03-CouldNotCompute.ll
llvm/trunk/test/Transforms/IndVarSimplify/2009-04-14-shorten_iv_vars.ll
llvm/trunk/test/Transforms/IndVarSimplify/2009-04-15-shorten-iv-vars-2.ll
llvm/trunk/test/Transforms/IndVarSimplify/2011-09-27-hoistsext.ll
llvm/trunk/test/Transforms/IndVarSimplify/2011-11-01-lftrptr.ll
llvm/trunk/test/Transforms/IndVarSimplify/2011-11-15-multiexit.ll
llvm/trunk/test/Transforms/IndVarSimplify/2014-06-21-congruent-constant.ll
llvm/trunk/test/Transforms/IndVarSimplify/ashr-tripcount.ll
llvm/trunk/test/Transforms/IndVarSimplify/avoid-i0.ll
llvm/trunk/test/Transforms/IndVarSimplify/eliminate-comparison.ll
llvm/trunk/test/Transforms/IndVarSimplify/eliminate-rem.ll
llvm/trunk/test/Transforms/IndVarSimplify/indirectbr.ll
llvm/trunk/test/Transforms/IndVarSimplify/iv-fold.ll
llvm/trunk/test/Transforms/IndVarSimplify/iv-sext.ll
llvm/trunk/test/Transforms/IndVarSimplify/iv-widen.ll
llvm/trunk/test/Transforms/IndVarSimplify/iv-zext.ll
llvm/trunk/test/Transforms/IndVarSimplify/lftr-promote.ll
llvm/trunk/test/Transforms/IndVarSimplify/lftr-reuse.ll
llvm/trunk/test/Transforms/IndVarSimplify/lftr-zext.ll
llvm/trunk/test/Transforms/IndVarSimplify/loop_evaluate7.ll
llvm/trunk/test/Transforms/IndVarSimplify/loop_evaluate8.ll
llvm/trunk/test/Transforms/IndVarSimplify/no-iv-rewrite.ll
llvm/trunk/test/Transforms/IndVarSimplify/overflowcheck.ll
llvm/trunk/test/Transforms/IndVarSimplify/phi-uses-value-multiple-times.ll
llvm/trunk/test/Transforms/IndVarSimplify/polynomial-expand.ll
llvm/trunk/test/Transforms/IndVarSimplify/pr18223.ll
llvm/trunk/test/Transforms/IndVarSimplify/pr20680.ll
llvm/trunk/test/Transforms/IndVarSimplify/pr22222.ll
llvm/trunk/test/Transforms/IndVarSimplify/promote-iv-to-eliminate-casts.ll
llvm/trunk/test/Transforms/IndVarSimplify/sharpen-range.ll
llvm/trunk/test/Transforms/IndVarSimplify/single-element-range.ll
llvm/trunk/test/Transforms/IndVarSimplify/sink-alloca.ll
llvm/trunk/test/Transforms/IndVarSimplify/udiv.ll
llvm/trunk/test/Transforms/IndVarSimplify/uglygep.ll
llvm/trunk/test/Transforms/IndVarSimplify/ult-sub-to-eq.ll
llvm/trunk/test/Transforms/IndVarSimplify/use-range-metadata.ll
llvm/trunk/test/Transforms/IndVarSimplify/variable-stride-ivs-0.ll
llvm/trunk/test/Transforms/IndVarSimplify/verify-scev.ll
llvm/trunk/test/Transforms/IndVarSimplify/widen-loop-comp.ll
llvm/trunk/test/Transforms/IndVarSimplify/widen-nsw.ll
llvm/trunk/test/Transforms/Inline/2006-07-12-InlinePruneCGUpdate.ll
llvm/trunk/test/Transforms/Inline/2009-01-08-NoInlineDynamicAlloca.ll
llvm/trunk/test/Transforms/Inline/2009-01-13-RecursiveInlineCrash.ll
llvm/trunk/test/Transforms/Inline/align.ll
llvm/trunk/test/Transforms/Inline/alloca-bonus.ll
llvm/trunk/test/Transforms/Inline/alloca-dbgdeclare.ll
llvm/trunk/test/Transforms/Inline/alloca-merge-align-nodl.ll
llvm/trunk/test/Transforms/Inline/alloca-merge-align.ll
llvm/trunk/test/Transforms/Inline/basictest.ll
llvm/trunk/test/Transforms/Inline/byval-tail-call.ll
llvm/trunk/test/Transforms/Inline/byval.ll
llvm/trunk/test/Transforms/Inline/byval_lifetime.ll
llvm/trunk/test/Transforms/Inline/crash2.ll
llvm/trunk/test/Transforms/Inline/devirtualize-3.ll
llvm/trunk/test/Transforms/Inline/devirtualize.ll
llvm/trunk/test/Transforms/Inline/ephemeral.ll
llvm/trunk/test/Transforms/Inline/gvn-inline-iteration.ll
llvm/trunk/test/Transforms/Inline/inline-byval-bonus.ll
llvm/trunk/test/Transforms/Inline/inline-cold.ll
llvm/trunk/test/Transforms/Inline/inline-fast-math-flags.ll
llvm/trunk/test/Transforms/Inline/inline-fp.ll
llvm/trunk/test/Transforms/Inline/inline-invoke-tail.ll
llvm/trunk/test/Transforms/Inline/inline-optsize.ll
llvm/trunk/test/Transforms/Inline/inline_constprop.ll
llvm/trunk/test/Transforms/Inline/inline_dbg_declare.ll
llvm/trunk/test/Transforms/Inline/inline_minisize.ll
llvm/trunk/test/Transforms/Inline/invoke-combine-clauses.ll
llvm/trunk/test/Transforms/Inline/noalias-cs.ll
llvm/trunk/test/Transforms/Inline/noalias.ll
llvm/trunk/test/Transforms/Inline/noalias2.ll
llvm/trunk/test/Transforms/Inline/optimization-remarks.ll
llvm/trunk/test/Transforms/Inline/ptr-diff.ll
llvm/trunk/test/Transforms/InstCombine/2003-07-21-ExternalConstant.ll
llvm/trunk/test/Transforms/InstCombine/2003-09-09-VolatileLoadElim.ll
llvm/trunk/test/Transforms/InstCombine/2004-01-13-InstCombineInvokePHI.ll
llvm/trunk/test/Transforms/InstCombine/2004-05-07-UnsizedCastLoad.ll
llvm/trunk/test/Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
llvm/trunk/test/Transforms/InstCombine/2004-09-20-BadLoadCombine2.ll
llvm/trunk/test/Transforms/InstCombine/2005-06-16-SetCCOrSetCCMiscompile.ll
llvm/trunk/test/Transforms/InstCombine/2006-09-15-CastToBool.ll
llvm/trunk/test/Transforms/InstCombine/2006-12-08-Phi-ICmp-Op-Fold.ll
llvm/trunk/test/Transforms/InstCombine/2006-12-08-Select-ICmp.ll
llvm/trunk/test/Transforms/InstCombine/2006-12-15-Range-Test.ll
llvm/trunk/test/Transforms/InstCombine/2006-12-23-Select-Cmp-Cmp.ll
llvm/trunk/test/Transforms/InstCombine/2007-02-01-LoadSinkAlloca.ll
llvm/trunk/test/Transforms/InstCombine/2007-02-07-PointerCast.ll
llvm/trunk/test/Transforms/InstCombine/2007-03-25-BadShiftMask.ll
llvm/trunk/test/Transforms/InstCombine/2007-06-06-AshrSignBit.ll
llvm/trunk/test/Transforms/InstCombine/2007-09-10-AliasConstFold.ll
llvm/trunk/test/Transforms/InstCombine/2007-10-10-EliminateMemCpy.ll
llvm/trunk/test/Transforms/InstCombine/2007-10-31-RangeCrash.ll
llvm/trunk/test/Transforms/InstCombine/2007-10-31-StringCrash.ll
llvm/trunk/test/Transforms/InstCombine/2007-11-07-OpaqueAlignCrash.ll
llvm/trunk/test/Transforms/InstCombine/2007-12-28-IcmpSub2.ll
llvm/trunk/test/Transforms/InstCombine/2008-03-13-IntToPtr.ll
llvm/trunk/test/Transforms/InstCombine/2008-04-29-VolatileLoadDontMerge.ll
llvm/trunk/test/Transforms/InstCombine/2008-04-29-VolatileLoadMerge.ll
llvm/trunk/test/Transforms/InstCombine/2008-05-09-SinkOfInvoke.ll
llvm/trunk/test/Transforms/InstCombine/2008-05-17-InfLoop.ll
llvm/trunk/test/Transforms/InstCombine/2008-05-23-CompareFold.ll
llvm/trunk/test/Transforms/InstCombine/2008-06-19-UncondLoad.ll
llvm/trunk/test/Transforms/InstCombine/2008-07-08-VolatileLoadMerge.ll
llvm/trunk/test/Transforms/InstCombine/2008-08-05-And.ll
llvm/trunk/test/Transforms/InstCombine/2009-01-08-AlignAlloca.ll
llvm/trunk/test/Transforms/InstCombine/2009-01-19-fmod-constant-float-specials.ll
llvm/trunk/test/Transforms/InstCombine/2009-01-19-fmod-constant-float.ll
llvm/trunk/test/Transforms/InstCombine/2009-02-20-InstCombine-SROA.ll
llvm/trunk/test/Transforms/InstCombine/2009-02-21-LoadCST.ll
llvm/trunk/test/Transforms/InstCombine/2009-02-25-CrashZeroSizeArray.ll
llvm/trunk/test/Transforms/InstCombine/2009-03-18-vector-ashr-crash.ll
llvm/trunk/test/Transforms/InstCombine/2009-05-23-FCmpToICmp.ll
llvm/trunk/test/Transforms/InstCombine/2010-03-03-ExtElim.ll
llvm/trunk/test/Transforms/InstCombine/2011-05-02-VectorBoolean.ll
llvm/trunk/test/Transforms/InstCombine/2011-05-28-swapmulsub.ll
llvm/trunk/test/Transforms/InstCombine/2011-06-13-nsw-alloca.ll
llvm/trunk/test/Transforms/InstCombine/2011-10-07-AlignPromotion.ll
llvm/trunk/test/Transforms/InstCombine/2012-05-27-Negative-Shift-Crash.ll
llvm/trunk/test/Transforms/InstCombine/2012-05-28-select-hang.ll
llvm/trunk/test/Transforms/InstCombine/2012-06-06-LoadOfPHIs.ll
llvm/trunk/test/Transforms/InstCombine/2012-07-25-LoadPart.ll
llvm/trunk/test/Transforms/InstCombine/2012-10-25-vector-of-pointers.ll
llvm/trunk/test/Transforms/InstCombine/2012-12-14-simp-vgep.ll
llvm/trunk/test/Transforms/InstCombine/2013-03-05-Combine-BitcastTy-Into-Alloca.ll
llvm/trunk/test/Transforms/InstCombine/CPP_min_max.ll
llvm/trunk/test/Transforms/InstCombine/add3.ll
llvm/trunk/test/Transforms/InstCombine/addrspacecast.ll
llvm/trunk/test/Transforms/InstCombine/alias-recursion.ll
llvm/trunk/test/Transforms/InstCombine/align-addr.ll
llvm/trunk/test/Transforms/InstCombine/align-attr.ll
llvm/trunk/test/Transforms/InstCombine/align-external.ll
llvm/trunk/test/Transforms/InstCombine/aligned-altivec.ll
llvm/trunk/test/Transforms/InstCombine/aligned-qpx.ll
llvm/trunk/test/Transforms/InstCombine/alloca.ll
llvm/trunk/test/Transforms/InstCombine/assume-loop-align.ll
llvm/trunk/test/Transforms/InstCombine/assume-redundant.ll
llvm/trunk/test/Transforms/InstCombine/assume.ll
llvm/trunk/test/Transforms/InstCombine/atomic.ll
llvm/trunk/test/Transforms/InstCombine/bitcast-alias-function.ll
llvm/trunk/test/Transforms/InstCombine/bitcast.ll
llvm/trunk/test/Transforms/InstCombine/bittest.ll
llvm/trunk/test/Transforms/InstCombine/call2.ll
llvm/trunk/test/Transforms/InstCombine/cast.ll
llvm/trunk/test/Transforms/InstCombine/cast_ptr.ll
llvm/trunk/test/Transforms/InstCombine/constant-fold-address-space-pointer.ll
llvm/trunk/test/Transforms/InstCombine/crash.ll
llvm/trunk/test/Transforms/InstCombine/debuginfo.ll
llvm/trunk/test/Transforms/InstCombine/descale-zero.ll
llvm/trunk/test/Transforms/InstCombine/div-shift-crash.ll
llvm/trunk/test/Transforms/InstCombine/err-rep-cold.ll
llvm/trunk/test/Transforms/InstCombine/extractvalue.ll
llvm/trunk/test/Transforms/InstCombine/fmul.ll
llvm/trunk/test/Transforms/InstCombine/fold-vector-zero.ll
llvm/trunk/test/Transforms/InstCombine/fp-ret-bitcast.ll
llvm/trunk/test/Transforms/InstCombine/fpextend.ll
llvm/trunk/test/Transforms/InstCombine/gc.relocate.ll
llvm/trunk/test/Transforms/InstCombine/gep-addrspace.ll
llvm/trunk/test/Transforms/InstCombine/gep-sext.ll
llvm/trunk/test/Transforms/InstCombine/gepphigep.ll
llvm/trunk/test/Transforms/InstCombine/getelementptr.ll
llvm/trunk/test/Transforms/InstCombine/icmp-range.ll
llvm/trunk/test/Transforms/InstCombine/invariant.ll
llvm/trunk/test/Transforms/InstCombine/known_align.ll
llvm/trunk/test/Transforms/InstCombine/load-cmp.ll
llvm/trunk/test/Transforms/InstCombine/load-select.ll
llvm/trunk/test/Transforms/InstCombine/load.ll
llvm/trunk/test/Transforms/InstCombine/load3.ll
llvm/trunk/test/Transforms/InstCombine/loadstore-alignment.ll
llvm/trunk/test/Transforms/InstCombine/loadstore-metadata.ll
llvm/trunk/test/Transforms/InstCombine/lshr-phi.ll
llvm/trunk/test/Transforms/InstCombine/malloc-free-delete.ll
llvm/trunk/test/Transforms/InstCombine/mem-gep-zidx.ll
llvm/trunk/test/Transforms/InstCombine/memcmp-1.ll
llvm/trunk/test/Transforms/InstCombine/memcpy-from-global.ll
llvm/trunk/test/Transforms/InstCombine/merge-icmp.ll
llvm/trunk/test/Transforms/InstCombine/mul.ll
llvm/trunk/test/Transforms/InstCombine/multi-size-address-space-pointer.ll
llvm/trunk/test/Transforms/InstCombine/no-negzero.ll
llvm/trunk/test/Transforms/InstCombine/obfuscated_splat.ll
llvm/trunk/test/Transforms/InstCombine/objsize.ll
llvm/trunk/test/Transforms/InstCombine/odr-linkage.ll
llvm/trunk/test/Transforms/InstCombine/or.ll
llvm/trunk/test/Transforms/InstCombine/phi-merge-gep.ll
llvm/trunk/test/Transforms/InstCombine/phi.ll
llvm/trunk/test/Transforms/InstCombine/pr12251.ll
llvm/trunk/test/Transforms/InstCombine/pr2645-0.ll
llvm/trunk/test/Transforms/InstCombine/pr2645-1.ll
llvm/trunk/test/Transforms/InstCombine/select-cmp-br.ll
llvm/trunk/test/Transforms/InstCombine/select-load-call.ll
llvm/trunk/test/Transforms/InstCombine/select.ll
llvm/trunk/test/Transforms/InstCombine/shufflemask-undef.ll
llvm/trunk/test/Transforms/InstCombine/signed-comparison.ll
llvm/trunk/test/Transforms/InstCombine/simplify-demanded-bits-pointer.ll
llvm/trunk/test/Transforms/InstCombine/simplify-libcalls.ll
llvm/trunk/test/Transforms/InstCombine/sincospi.ll
llvm/trunk/test/Transforms/InstCombine/sqrt.ll
llvm/trunk/test/Transforms/InstCombine/store.ll
llvm/trunk/test/Transforms/InstCombine/strcmp-1.ll
llvm/trunk/test/Transforms/InstCombine/strncmp-1.ll
llvm/trunk/test/Transforms/InstCombine/struct-assign-tbaa.ll
llvm/trunk/test/Transforms/InstCombine/vec_demanded_elts.ll
llvm/trunk/test/Transforms/InstCombine/vec_extract_var_elt.ll
llvm/trunk/test/Transforms/InstCombine/vec_shuffle.ll
llvm/trunk/test/Transforms/InstCombine/volatile_store.ll
llvm/trunk/test/Transforms/InstCombine/vsx-unaligned.ll
llvm/trunk/test/Transforms/InstCombine/zext-or-icmp.ll
llvm/trunk/test/Transforms/InstMerge/ld_hoist1.ll
llvm/trunk/test/Transforms/InstMerge/ld_hoist_st_sink.ll
llvm/trunk/test/Transforms/InstMerge/st_sink_barrier_call.ll
llvm/trunk/test/Transforms/InstMerge/st_sink_bugfix_22613.ll
llvm/trunk/test/Transforms/InstMerge/st_sink_no_barrier_call.ll
llvm/trunk/test/Transforms/InstMerge/st_sink_no_barrier_load.ll
llvm/trunk/test/Transforms/InstMerge/st_sink_no_barrier_store.ll
llvm/trunk/test/Transforms/InstMerge/st_sink_two_stores.ll
llvm/trunk/test/Transforms/InstMerge/st_sink_with_barrier.ll
llvm/trunk/test/Transforms/InstSimplify/call-callconv.ll
llvm/trunk/test/Transforms/InstSimplify/compare.ll
llvm/trunk/test/Transforms/InstSimplify/load.ll
llvm/trunk/test/Transforms/InstSimplify/vector_ptr_bitcast.ll
llvm/trunk/test/Transforms/Internalize/2009-01-05-InternalizeAliases.ll
llvm/trunk/test/Transforms/JumpThreading/2010-08-26-and.ll
llvm/trunk/test/Transforms/JumpThreading/2011-04-14-InfLoop.ll
llvm/trunk/test/Transforms/JumpThreading/crash.ll
llvm/trunk/test/Transforms/JumpThreading/landing-pad.ll
llvm/trunk/test/Transforms/JumpThreading/lvi-load.ll
llvm/trunk/test/Transforms/JumpThreading/or-undef.ll
llvm/trunk/test/Transforms/JumpThreading/phi-eq.ll
llvm/trunk/test/Transforms/JumpThreading/select.ll
llvm/trunk/test/Transforms/JumpThreading/thread-loads.ll
llvm/trunk/test/Transforms/LCSSA/2006-06-03-IncorrectIDFPhis.ll
llvm/trunk/test/Transforms/LCSSA/2006-07-09-NoDominator.ll
llvm/trunk/test/Transforms/LCSSA/2007-07-12-LICM-2.ll
llvm/trunk/test/Transforms/LCSSA/2007-07-12-LICM-3.ll
llvm/trunk/test/Transforms/LCSSA/2007-07-12-LICM.ll
llvm/trunk/test/Transforms/LCSSA/unreachable-use.ll
llvm/trunk/test/Transforms/LICM/2003-05-02-LoadHoist.ll
llvm/trunk/test/Transforms/LICM/2004-09-14-AliasAnalysisInvalidate.ll
llvm/trunk/test/Transforms/LICM/2007-05-22-VolatileSink.ll
llvm/trunk/test/Transforms/LICM/2007-07-30-AliasSet.ll
llvm/trunk/test/Transforms/LICM/2008-07-22-LoadGlobalConstant.ll
llvm/trunk/test/Transforms/LICM/2009-12-10-LICM-Indbr-Crash.ll
llvm/trunk/test/Transforms/LICM/2011-04-06-HoistMissedASTUpdate.ll
llvm/trunk/test/Transforms/LICM/2011-04-06-PromoteResultOfPromotion.ll
llvm/trunk/test/Transforms/LICM/2011-04-09-RAUW-AST.ll
llvm/trunk/test/Transforms/LICM/PR21582.ll
llvm/trunk/test/Transforms/LICM/atomics.ll
llvm/trunk/test/Transforms/LICM/constexpr.ll
llvm/trunk/test/Transforms/LICM/crash.ll
llvm/trunk/test/Transforms/LICM/hoist-bitcast-load.ll
llvm/trunk/test/Transforms/LICM/hoist-deref-load.ll
llvm/trunk/test/Transforms/LICM/hoist-invariant-load.ll
llvm/trunk/test/Transforms/LICM/hoisting.ll
llvm/trunk/test/Transforms/LICM/lcssa-ssa-promoter.ll
llvm/trunk/test/Transforms/LICM/scalar-promote-memmodel.ll
llvm/trunk/test/Transforms/LICM/scalar_promote.ll
llvm/trunk/test/Transforms/LICM/sinking.ll
llvm/trunk/test/Transforms/LICM/speculate.ll
llvm/trunk/test/Transforms/LICM/volatile-alias.ll
llvm/trunk/test/Transforms/LoadCombine/load-combine-aa.ll
llvm/trunk/test/Transforms/LoadCombine/load-combine-assume.ll
llvm/trunk/test/Transforms/LoadCombine/load-combine.ll
llvm/trunk/test/Transforms/LoopDeletion/2008-05-06-Phi.ll
llvm/trunk/test/Transforms/LoopIdiom/basic-address-space.ll
llvm/trunk/test/Transforms/LoopIdiom/basic.ll
llvm/trunk/test/Transforms/LoopIdiom/scev-invalidation.ll
llvm/trunk/test/Transforms/LoopReroll/basic.ll
llvm/trunk/test/Transforms/LoopReroll/nonconst_lb.ll
llvm/trunk/test/Transforms/LoopReroll/reduction.ll
llvm/trunk/test/Transforms/LoopRotate/PhiRename-1.ll
llvm/trunk/test/Transforms/LoopRotate/alloca.ll
llvm/trunk/test/Transforms/LoopRotate/dbgvalue.ll
llvm/trunk/test/Transforms/LoopRotate/indirectbr.ll
llvm/trunk/test/Transforms/LoopRotate/multiple-exits.ll
llvm/trunk/test/Transforms/LoopRotate/nosimplifylatch.ll
llvm/trunk/test/Transforms/LoopRotate/phi-duplicate.ll
llvm/trunk/test/Transforms/LoopRotate/simplifylatch.ll
llvm/trunk/test/Transforms/LoopSimplify/2003-08-15-PreheadersFail.ll
llvm/trunk/test/Transforms/LoopSimplify/2003-12-10-ExitBlocksProblem.ll
llvm/trunk/test/Transforms/LoopSimplify/ashr-crash.ll
llvm/trunk/test/Transforms/LoopSimplify/merge-exits.ll
llvm/trunk/test/Transforms/LoopSimplify/phi-node-simplify.ll
llvm/trunk/test/Transforms/LoopSimplify/preserve-scev.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2005-08-17-OutOfLoopVariant.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2008-08-13-CmpStride.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2008-09-09-Overflow.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2009-01-13-nonconstant-stride-outside-loop.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2009-04-28-no-reduce-mul.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2011-07-19-CritEdgeBreakCrash.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2011-10-06-ReusePhi.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2011-12-19-PostincQuadratic.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2012-03-15-nopreheader.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2012-03-26-constexpr.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2012-07-13-ExpandUDiv.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2012-07-18-LimitReassociate.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/2013-01-14-ReuseCast.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/AArch64/lsr-memcpy.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/AArch64/req-regs.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/ARM/2012-06-15-lsr-noaddrmode.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/ARM/ivchain-ARM.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/X86/2011-12-04-loserreg.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/X86/2012-01-13-phielim.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/X86/ivchain-X86.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/X86/ivchain-stress-X86.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/X86/no_superflous_induction_vars.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/X86/pr17473.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/addrec-gep-address-space.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/addrec-gep.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/address-space-loop.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/dont_reverse.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/post-inc-icmpzero.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/pr12691.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/pr18165.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/pr2570.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/pr3086.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/pr3399.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/pr3571.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/share_code_in_preheader.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/uglygep-address-space.ll
llvm/trunk/test/Transforms/LoopStrengthReduce/uglygep.ll
llvm/trunk/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll
llvm/trunk/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll
llvm/trunk/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll
llvm/trunk/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll
llvm/trunk/test/Transforms/LoopUnroll/X86/partial.ll
llvm/trunk/test/Transforms/LoopUnroll/ephemeral.ll
llvm/trunk/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
llvm/trunk/test/Transforms/LoopUnroll/runtime-loop.ll
llvm/trunk/test/Transforms/LoopUnroll/runtime-loop1.ll
llvm/trunk/test/Transforms/LoopUnroll/runtime-loop2.ll
llvm/trunk/test/Transforms/LoopUnroll/runtime-loop3.ll
llvm/trunk/test/Transforms/LoopUnroll/scevunroll.ll
llvm/trunk/test/Transforms/LoopUnroll/shifted-tripcount.ll
llvm/trunk/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
llvm/trunk/test/Transforms/LoopUnroll/unroll-pragmas.ll
llvm/trunk/test/Transforms/LoopUnswitch/2008-06-17-DomFrontier.ll
llvm/trunk/test/Transforms/LoopUnswitch/2010-11-18-LCSSA.ll
llvm/trunk/test/Transforms/LoopUnswitch/2011-09-26-EHCrash.ll
llvm/trunk/test/Transforms/LoopUnswitch/2011-11-18-SimpleSwitch.ll
llvm/trunk/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches-Threshold.ll
llvm/trunk/test/Transforms/LoopUnswitch/2011-11-18-TwoSwitches.ll
llvm/trunk/test/Transforms/LoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll
llvm/trunk/test/Transforms/LoopUnswitch/2012-05-20-Phi.ll
llvm/trunk/test/Transforms/LoopUnswitch/basictest.ll
llvm/trunk/test/Transforms/LoopUnswitch/preserve-analyses.ll
llvm/trunk/test/Transforms/LoopVectorize/12-12-11-if-conv.ll
llvm/trunk/test/Transforms/LoopVectorize/AArch64/aarch64-unroll.ll
llvm/trunk/test/Transforms/LoopVectorize/AArch64/arbitrary-induction-step.ll
llvm/trunk/test/Transforms/LoopVectorize/AArch64/arm64-unroll.ll
llvm/trunk/test/Transforms/LoopVectorize/AArch64/gather-cost.ll
llvm/trunk/test/Transforms/LoopVectorize/AArch64/sdiv-pow2.ll
llvm/trunk/test/Transforms/LoopVectorize/ARM/arm-unroll.ll
llvm/trunk/test/Transforms/LoopVectorize/ARM/gather-cost.ll
llvm/trunk/test/Transforms/LoopVectorize/ARM/gcc-examples.ll
llvm/trunk/test/Transforms/LoopVectorize/ARM/mul-cast-vect.ll
llvm/trunk/test/Transforms/LoopVectorize/ARM/width-detect.ll
llvm/trunk/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll
llvm/trunk/test/Transforms/LoopVectorize/PowerPC/vsx-tsvc-s173.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/already-vectorized.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/assume.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/avx1.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/constant-vector-operand.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/cost-model.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/fp32_to_uint32-cost-model.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/fp64_to_uint32-cost-model.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/fp_to_sint8-cost-model.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/gather-cost.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/gcc-examples.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/illegal-parallel-loop-uniform-write.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/masked_load_store.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/metadata-enable.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/min-trip-count-switch.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/no-vector.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/parallel-loops-after-reg2mem.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/parallel-loops.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/powof2div.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/reduction-crash.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/small-size.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/tripcount.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/uint64_to_fp64-cost-model.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/unroll-pm.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/unroll_selection.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/vector-scalar-select-cost.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/vector_ptr_load_store.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/vectorization-remarks-missed.ll
llvm/trunk/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll
llvm/trunk/test/Transforms/LoopVectorize/align.ll
llvm/trunk/test/Transforms/LoopVectorize/bzip_reverse_loops.ll
llvm/trunk/test/Transforms/LoopVectorize/calloc.ll
llvm/trunk/test/Transforms/LoopVectorize/conditional-assignment.ll
llvm/trunk/test/Transforms/LoopVectorize/control-flow.ll
llvm/trunk/test/Transforms/LoopVectorize/cpp-new-array.ll
llvm/trunk/test/Transforms/LoopVectorize/dbg.value.ll
llvm/trunk/test/Transforms/LoopVectorize/debugloc.ll
llvm/trunk/test/Transforms/LoopVectorize/duplicated-metadata.ll
llvm/trunk/test/Transforms/LoopVectorize/ee-crash.ll
llvm/trunk/test/Transforms/LoopVectorize/exact.ll
llvm/trunk/test/Transforms/LoopVectorize/flags.ll
llvm/trunk/test/Transforms/LoopVectorize/float-reduction.ll
llvm/trunk/test/Transforms/LoopVectorize/funcall.ll
llvm/trunk/test/Transforms/LoopVectorize/gcc-examples.ll
llvm/trunk/test/Transforms/LoopVectorize/global_alias.ll
llvm/trunk/test/Transforms/LoopVectorize/hoist-loads.ll
llvm/trunk/test/Transforms/LoopVectorize/i8-induction.ll
llvm/trunk/test/Transforms/LoopVectorize/if-conversion-edgemasks.ll
llvm/trunk/test/Transforms/LoopVectorize/if-conversion-nest.ll
llvm/trunk/test/Transforms/LoopVectorize/if-conversion-reduction.ll
llvm/trunk/test/Transforms/LoopVectorize/if-conversion.ll
llvm/trunk/test/Transforms/LoopVectorize/if-pred-stores.ll
llvm/trunk/test/Transforms/LoopVectorize/incorrect-dom-info.ll
llvm/trunk/test/Transforms/LoopVectorize/increment.ll
llvm/trunk/test/Transforms/LoopVectorize/induction.ll
llvm/trunk/test/Transforms/LoopVectorize/intrinsic.ll
llvm/trunk/test/Transforms/LoopVectorize/lifetime.ll
llvm/trunk/test/Transforms/LoopVectorize/loop-vect-memdep.ll
llvm/trunk/test/Transforms/LoopVectorize/memdep.ll
llvm/trunk/test/Transforms/LoopVectorize/metadata-unroll.ll
llvm/trunk/test/Transforms/LoopVectorize/metadata.ll
llvm/trunk/test/Transforms/LoopVectorize/minmax_reduction.ll
llvm/trunk/test/Transforms/LoopVectorize/multi-use-reduction-bug.ll
llvm/trunk/test/Transforms/LoopVectorize/multiple-address-spaces.ll
llvm/trunk/test/Transforms/LoopVectorize/no_array_bounds.ll
llvm/trunk/test/Transforms/LoopVectorize/no_idiv_reduction.ll
llvm/trunk/test/Transforms/LoopVectorize/no_int_induction.ll
llvm/trunk/test/Transforms/LoopVectorize/no_outside_user.ll
llvm/trunk/test/Transforms/LoopVectorize/no_switch.ll
llvm/trunk/test/Transforms/LoopVectorize/non-const-n.ll
llvm/trunk/test/Transforms/LoopVectorize/opt.ll
llvm/trunk/test/Transforms/LoopVectorize/ptr_loops.ll
llvm/trunk/test/Transforms/LoopVectorize/read-only.ll
llvm/trunk/test/Transforms/LoopVectorize/reduction.ll
llvm/trunk/test/Transforms/LoopVectorize/reverse_induction.ll
llvm/trunk/test/Transforms/LoopVectorize/reverse_iter.ll
llvm/trunk/test/Transforms/LoopVectorize/runtime-check-address-space.ll
llvm/trunk/test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
llvm/trunk/test/Transforms/LoopVectorize/runtime-check-readonly.ll
llvm/trunk/test/Transforms/LoopVectorize/runtime-check.ll
llvm/trunk/test/Transforms/LoopVectorize/runtime-limit.ll
llvm/trunk/test/Transforms/LoopVectorize/safegep.ll
llvm/trunk/test/Transforms/LoopVectorize/same-base-access.ll
llvm/trunk/test/Transforms/LoopVectorize/scalar-select.ll
llvm/trunk/test/Transforms/LoopVectorize/scev-exitlim-crash.ll
llvm/trunk/test/Transforms/LoopVectorize/simple-unroll.ll
llvm/trunk/test/Transforms/LoopVectorize/small-loop.ll
llvm/trunk/test/Transforms/LoopVectorize/start-non-zero.ll
llvm/trunk/test/Transforms/LoopVectorize/store-shuffle-bug.ll
llvm/trunk/test/Transforms/LoopVectorize/struct_access.ll
llvm/trunk/test/Transforms/LoopVectorize/tbaa-nodep.ll
llvm/trunk/test/Transforms/LoopVectorize/unroll_novec.ll
llvm/trunk/test/Transforms/LoopVectorize/value-ptr-bug.ll
llvm/trunk/test/Transforms/LoopVectorize/vect.omp.persistence.ll
llvm/trunk/test/Transforms/LoopVectorize/vect.stats.ll
llvm/trunk/test/Transforms/LoopVectorize/vectorize-once.ll
llvm/trunk/test/Transforms/LoopVectorize/version-mem-access.ll
llvm/trunk/test/Transforms/LoopVectorize/write-only.ll
llvm/trunk/test/Transforms/LowerAtomic/atomic-swap.ll
llvm/trunk/test/Transforms/LowerBitSets/simple.ll
llvm/trunk/test/Transforms/LowerExpectIntrinsic/basic.ll
llvm/trunk/test/Transforms/LowerSwitch/2014-06-10-SwitchContiguousOpt.ll
llvm/trunk/test/Transforms/LowerSwitch/2014-06-11-SwitchDefaultUnreachableOpt.ll
llvm/trunk/test/Transforms/Mem2Reg/2002-03-28-UninitializedVal.ll
llvm/trunk/test/Transforms/Mem2Reg/2003-04-24-MultipleIdenticalSuccessors.ll
llvm/trunk/test/Transforms/Mem2Reg/2003-06-26-IterativePromote.ll
llvm/trunk/test/Transforms/Mem2Reg/2003-10-05-DeadPHIInsertion.ll
llvm/trunk/test/Transforms/Mem2Reg/2005-06-30-ReadBeforeWrite.ll
llvm/trunk/test/Transforms/Mem2Reg/2005-11-28-Crash.ll
llvm/trunk/test/Transforms/Mem2Reg/2007-08-27-VolatileLoadsStores.ll
llvm/trunk/test/Transforms/Mem2Reg/ConvertDebugInfo.ll
llvm/trunk/test/Transforms/Mem2Reg/ConvertDebugInfo2.ll
llvm/trunk/test/Transforms/Mem2Reg/PromoteMemToRegister.ll
llvm/trunk/test/Transforms/Mem2Reg/UndefValuesMerge.ll
llvm/trunk/test/Transforms/Mem2Reg/atomic.ll
llvm/trunk/test/Transforms/Mem2Reg/crash.ll
llvm/trunk/test/Transforms/MemCpyOpt/2008-03-13-ReturnSlotBitcast.ll
llvm/trunk/test/Transforms/MemCpyOpt/2011-06-02-CallSlotOverwritten.ll
llvm/trunk/test/Transforms/MemCpyOpt/atomic.ll
llvm/trunk/test/Transforms/MemCpyOpt/loadstore-sret.ll
llvm/trunk/test/Transforms/MemCpyOpt/memcpy.ll
llvm/trunk/test/Transforms/MemCpyOpt/sret.ll
llvm/trunk/test/Transforms/MergeFunc/2011-02-08-RemoveEqual.ll
llvm/trunk/test/Transforms/MergeFunc/address-spaces.ll
llvm/trunk/test/Transforms/MergeFunc/crash.ll
llvm/trunk/test/Transforms/MergeFunc/inttoptr-address-space.ll
llvm/trunk/test/Transforms/MergeFunc/inttoptr.ll
llvm/trunk/test/Transforms/MergeFunc/mergefunc-struct-return.ll
llvm/trunk/test/Transforms/MergeFunc/ranges.ll
llvm/trunk/test/Transforms/MergeFunc/vector.ll
llvm/trunk/test/Transforms/MetaRenamer/metarenamer.ll
llvm/trunk/test/Transforms/ObjCARC/allocas.ll
llvm/trunk/test/Transforms/ObjCARC/basic.ll
llvm/trunk/test/Transforms/ObjCARC/cfg-hazards.ll
llvm/trunk/test/Transforms/ObjCARC/contract-storestrong-ivar.ll
llvm/trunk/test/Transforms/ObjCARC/contract-storestrong.ll
llvm/trunk/test/Transforms/ObjCARC/ensure-that-exception-unwind-path-is-visited.ll
llvm/trunk/test/Transforms/ObjCARC/escape.ll
llvm/trunk/test/Transforms/ObjCARC/gvn.ll
llvm/trunk/test/Transforms/ObjCARC/intrinsic-use.ll
llvm/trunk/test/Transforms/ObjCARC/move-and-form-retain-autorelease.ll
llvm/trunk/test/Transforms/ObjCARC/move-and-merge-autorelease.ll
llvm/trunk/test/Transforms/ObjCARC/nested.ll
llvm/trunk/test/Transforms/ObjCARC/provenance.ll
llvm/trunk/test/Transforms/ObjCARC/retain-block-side-effects.ll
llvm/trunk/test/Transforms/PhaseOrdering/2010-03-22-empty-baseclass.ll
llvm/trunk/test/Transforms/PhaseOrdering/PR6627.ll
llvm/trunk/test/Transforms/PhaseOrdering/basic.ll
llvm/trunk/test/Transforms/PhaseOrdering/gdce.ll
llvm/trunk/test/Transforms/Reassociate/2011-01-26-UseAfterFree.ll
llvm/trunk/test/Transforms/Reassociate/basictest.ll
llvm/trunk/test/Transforms/Reassociate/crash.ll
llvm/trunk/test/Transforms/Reassociate/fast-basictest.ll
llvm/trunk/test/Transforms/Reassociate/pr12245.ll
llvm/trunk/test/Transforms/Reassociate/pr21205.ll
llvm/trunk/test/Transforms/RewriteStatepointsForGC/basics.ll
llvm/trunk/test/Transforms/SCCP/2003-06-24-OverdefinedPHIValue.ll
llvm/trunk/test/Transforms/SCCP/2006-10-23-IPSCCP-Crash.ll
llvm/trunk/test/Transforms/SCCP/2006-12-04-PackedType.ll
llvm/trunk/test/Transforms/SCCP/apint-array.ll
llvm/trunk/test/Transforms/SCCP/apint-bigarray.ll
llvm/trunk/test/Transforms/SCCP/apint-bigint2.ll
llvm/trunk/test/Transforms/SCCP/apint-ipsccp3.ll
llvm/trunk/test/Transforms/SCCP/apint-ipsccp4.ll
llvm/trunk/test/Transforms/SCCP/apint-load.ll
llvm/trunk/test/Transforms/SCCP/atomic-load-store.ll
llvm/trunk/test/Transforms/SCCP/ipsccp-basic.ll
llvm/trunk/test/Transforms/SCCP/loadtest.ll
llvm/trunk/test/Transforms/SLPVectorizer/AArch64/commute.ll
llvm/trunk/test/Transforms/SLPVectorizer/AArch64/load-store-q.ll
llvm/trunk/test/Transforms/SLPVectorizer/AArch64/sdiv-pow2.ll
llvm/trunk/test/Transforms/SLPVectorizer/ARM/memory.ll
llvm/trunk/test/Transforms/SLPVectorizer/R600/simplebb.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/addsub.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/align.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/call.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/cast.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/cmp_sel.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/compare-reduce.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/consecutive-access.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/continue_vectorizing.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_binaryop.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_bullet.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_cmpop.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_dequeue.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_gep.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_lencod.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_mandeltext.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_netbsd_decompress.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/crash_vectorizeTree.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/cross_block_slp.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/cse.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/cycle_dup.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/debug_info.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/diamond.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/external_user.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/extract.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/extract_in_tree_user.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/flag.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/gep.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/hoist.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/implicitfloat.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/in-tree-user.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/intrinsic.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/long_chains.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/loopinvariant.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/metadata.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_block.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/multi_user.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/odd_store.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/operandorder.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/opt.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/ordering.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/phi.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/phi3.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/phi_overalignedtype.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/powof2div.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/pr16628.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/pr16899.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/pr19657.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/propagate_ir_flags.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/reduction2.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/return.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/rgb_phi.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/saxpy.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/scheduling.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/simple-loop.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/simplebb.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/tiny-tree.ll
llvm/trunk/test/Transforms/SLPVectorizer/X86/unreachable.ll
llvm/trunk/test/Transforms/SLPVectorizer/XCore/no-vector-registers.ll
llvm/trunk/test/Transforms/SROA/address-spaces.ll
llvm/trunk/test/Transforms/SROA/alignment.ll
llvm/trunk/test/Transforms/SROA/basictest.ll
llvm/trunk/test/Transforms/SROA/big-endian.ll
llvm/trunk/test/Transforms/SROA/fca.ll
llvm/trunk/test/Transforms/SROA/phi-and-select.ll
llvm/trunk/test/Transforms/SROA/slice-order-independence.ll
llvm/trunk/test/Transforms/SROA/slice-width.ll
llvm/trunk/test/Transforms/SROA/vector-conversion.ll
llvm/trunk/test/Transforms/SROA/vector-lifetime-intrinsic.ll
llvm/trunk/test/Transforms/SROA/vector-promotion.ll
llvm/trunk/test/Transforms/SROA/vectors-of-pointers.ll
llvm/trunk/test/Transforms/SampleProfile/branch.ll
llvm/trunk/test/Transforms/SampleProfile/calls.ll
llvm/trunk/test/Transforms/SampleProfile/discriminator.ll
llvm/trunk/test/Transforms/SampleProfile/propagate.ll
llvm/trunk/test/Transforms/ScalarRepl/2003-05-29-ArrayFail.ll
llvm/trunk/test/Transforms/ScalarRepl/2006-11-07-InvalidArrayPromote.ll
llvm/trunk/test/Transforms/ScalarRepl/2007-05-29-MemcpyPreserve.ll
llvm/trunk/test/Transforms/ScalarRepl/2007-11-03-bigendian_apint.ll
llvm/trunk/test/Transforms/ScalarRepl/2008-01-29-PromoteBug.ll
llvm/trunk/test/Transforms/ScalarRepl/2008-02-28-SubElementExtractCrash.ll
llvm/trunk/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll
llvm/trunk/test/Transforms/ScalarRepl/2008-08-22-out-of-range-array-promote.ll
llvm/trunk/test/Transforms/ScalarRepl/2009-02-02-ScalarPromoteOutOfRange.ll
llvm/trunk/test/Transforms/ScalarRepl/2009-02-05-LoadFCA.ll
llvm/trunk/test/Transforms/ScalarRepl/2009-12-11-NeonTypes.ll
llvm/trunk/test/Transforms/ScalarRepl/2011-06-08-VectorExtractValue.ll
llvm/trunk/test/Transforms/ScalarRepl/2011-06-17-VectorPartialMemset.ll
llvm/trunk/test/Transforms/ScalarRepl/2011-09-22-PHISpeculateInvoke.ll
llvm/trunk/test/Transforms/ScalarRepl/2011-11-11-EmptyStruct.ll
llvm/trunk/test/Transforms/ScalarRepl/AggregatePromote.ll
llvm/trunk/test/Transforms/ScalarRepl/DifferingTypes.ll
llvm/trunk/test/Transforms/ScalarRepl/address-space.ll
llvm/trunk/test/Transforms/ScalarRepl/arraytest.ll
llvm/trunk/test/Transforms/ScalarRepl/badarray.ll
llvm/trunk/test/Transforms/ScalarRepl/basictest.ll
llvm/trunk/test/Transforms/ScalarRepl/bitfield-sroa.ll
llvm/trunk/test/Transforms/ScalarRepl/copy-aggregate.ll
llvm/trunk/test/Transforms/ScalarRepl/crash.ll
llvm/trunk/test/Transforms/ScalarRepl/debuginfo-preserved.ll
llvm/trunk/test/Transforms/ScalarRepl/inline-vector.ll
llvm/trunk/test/Transforms/ScalarRepl/lifetime.ll
llvm/trunk/test/Transforms/ScalarRepl/load-store-aggregate.ll
llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate-byte-leader.ll
llvm/trunk/test/Transforms/ScalarRepl/memset-aggregate.ll
llvm/trunk/test/Transforms/ScalarRepl/nonzero-first-index.ll
llvm/trunk/test/Transforms/ScalarRepl/not-a-vector.ll
llvm/trunk/test/Transforms/ScalarRepl/phi-cycle.ll
llvm/trunk/test/Transforms/ScalarRepl/phi-select.ll
llvm/trunk/test/Transforms/ScalarRepl/phinodepromote.ll
llvm/trunk/test/Transforms/ScalarRepl/select_promote.ll
llvm/trunk/test/Transforms/ScalarRepl/sroa-fca.ll
llvm/trunk/test/Transforms/ScalarRepl/sroa_two.ll
llvm/trunk/test/Transforms/ScalarRepl/union-fp-int.ll
llvm/trunk/test/Transforms/ScalarRepl/union-packed.ll
llvm/trunk/test/Transforms/ScalarRepl/union-pointer.ll
llvm/trunk/test/Transforms/ScalarRepl/vector_memcpy.ll
llvm/trunk/test/Transforms/ScalarRepl/vector_promote.ll
llvm/trunk/test/Transforms/ScalarRepl/vectors-with-mismatched-elements.ll
llvm/trunk/test/Transforms/ScalarRepl/volatile.ll
llvm/trunk/test/Transforms/Scalarizer/basic.ll
llvm/trunk/test/Transforms/Scalarizer/dbginfo.ll
llvm/trunk/test/Transforms/Scalarizer/no-data-layout.ll
llvm/trunk/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll
llvm/trunk/test/Transforms/SimplifyCFG/2005-06-16-PHICrash.ll
llvm/trunk/test/Transforms/SimplifyCFG/2005-12-03-IncorrectPHIFold.ll
llvm/trunk/test/Transforms/SimplifyCFG/2006-08-03-Crash.ll
llvm/trunk/test/Transforms/SimplifyCFG/2006-12-08-Ptr-ICmp-Branch.ll
llvm/trunk/test/Transforms/SimplifyCFG/2008-01-02-hoist-fp-add.ll
llvm/trunk/test/Transforms/SimplifyCFG/2008-07-13-InfLoopMiscompile.ll
llvm/trunk/test/Transforms/SimplifyCFG/2008-09-08-MultiplePred.ll
llvm/trunk/test/Transforms/SimplifyCFG/2009-05-12-externweak.ll
llvm/trunk/test/Transforms/SimplifyCFG/AArch64/prefer-fma.ll
llvm/trunk/test/Transforms/SimplifyCFG/EmptyBlockMerge.ll
llvm/trunk/test/Transforms/SimplifyCFG/PR17073.ll
llvm/trunk/test/Transforms/SimplifyCFG/SpeculativeExec.ll
llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-covered-bug.ll
llvm/trunk/test/Transforms/SimplifyCFG/X86/switch-table-bug.ll
llvm/trunk/test/Transforms/SimplifyCFG/X86/switch_to_lookup_table.ll
llvm/trunk/test/Transforms/SimplifyCFG/basictest.ll
llvm/trunk/test/Transforms/SimplifyCFG/branch-fold-threshold.ll
llvm/trunk/test/Transforms/SimplifyCFG/branch-phi-thread.ll
llvm/trunk/test/Transforms/SimplifyCFG/dbginfo.ll
llvm/trunk/test/Transforms/SimplifyCFG/hoist-common-code.ll
llvm/trunk/test/Transforms/SimplifyCFG/hoist-with-range.ll
llvm/trunk/test/Transforms/SimplifyCFG/indirectbr.ll
llvm/trunk/test/Transforms/SimplifyCFG/iterative-simplify.ll
llvm/trunk/test/Transforms/SimplifyCFG/multiple-phis.ll
llvm/trunk/test/Transforms/SimplifyCFG/no_speculative_loads_with_tsan.ll
llvm/trunk/test/Transforms/SimplifyCFG/phi-undef-loadstore.ll
llvm/trunk/test/Transforms/SimplifyCFG/preserve-branchweights.ll
llvm/trunk/test/Transforms/SimplifyCFG/speculate-store.ll
llvm/trunk/test/Transforms/SimplifyCFG/speculate-with-offset.ll
llvm/trunk/test/Transforms/SimplifyCFG/switch-to-select-multiple-edge-per-block-phi.ll
llvm/trunk/test/Transforms/SimplifyCFG/switch_create.ll
llvm/trunk/test/Transforms/SimplifyCFG/trapping-load-unreachable.ll
llvm/trunk/test/Transforms/SimplifyCFG/unreachable-blocks.ll
llvm/trunk/test/Transforms/Sink/basic.ll
llvm/trunk/test/Transforms/StripSymbols/strip-dead-debug-info.ll
llvm/trunk/test/Transforms/StructurizeCFG/post-order-traversal-bug.ll
llvm/trunk/test/Transforms/TailCallElim/basic.ll
llvm/trunk/test/Transforms/TailCallElim/dont_reorder_load.ll
llvm/trunk/test/Transforms/TailCallElim/reorder_load.ll
llvm/trunk/test/Verifier/2006-10-15-AddrLabel.ll
llvm/trunk/test/Verifier/range-1.ll
llvm/trunk/test/Verifier/range-2.ll
llvm/trunk/test/tools/gold/slp-vectorize.ll
llvm/trunk/test/tools/gold/vectorize.ll
Modified: llvm/trunk/lib/AsmParser/LLParser.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/AsmParser/LLParser.cpp?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/lib/AsmParser/LLParser.cpp (original)
+++ llvm/trunk/lib/AsmParser/LLParser.cpp Fri Feb 27 15:17:42 2015
@@ -5241,7 +5241,11 @@ int LLParser::ParseLoad(Instruction *&In
Lex.Lex();
}
- if (ParseTypeAndValue(Val, Loc, PFS) ||
+ Type *Ty = nullptr;
+ LocTy ExplicitTypeLoc = Lex.getLoc();
+ if (ParseType(Ty) ||
+ ParseToken(lltok::comma, "expected comma after load's type") ||
+ ParseTypeAndValue(Val, Loc, PFS) ||
ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
ParseOptionalCommaAlign(Alignment, AteExtraComma))
return true;
@@ -5254,6 +5258,10 @@ int LLParser::ParseLoad(Instruction *&In
if (Ordering == Release || Ordering == AcquireRelease)
return Error(Loc, "atomic load cannot use Release ordering");
+ if (Ty != cast<PointerType>(Val->getType())->getElementType())
+ return Error(ExplicitTypeLoc,
+ "explicit pointee type doesn't match operand's pointee type");
+
Inst = new LoadInst(Val, "", isVolatile, Alignment, Ordering, Scope);
return AteExtraComma ? InstExtraComma : InstNormal;
}
Modified: llvm/trunk/lib/IR/AsmWriter.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/AsmWriter.cpp?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/lib/IR/AsmWriter.cpp (original)
+++ llvm/trunk/lib/IR/AsmWriter.cpp Fri Feb 27 15:17:42 2015
@@ -2898,10 +2898,14 @@ void AssemblyWriter::printInstruction(co
Out << ", ";
TypePrinter.print(I.getType(), Out);
} else if (Operand) { // Print the normal way.
- if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
+ if (const auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
Out << ' ';
TypePrinter.print(GEP->getSourceElementType(), Out);
Out << ',';
+ } else if (const auto *LI = dyn_cast<LoadInst>(&I)) {
+ Out << ' ';
+ TypePrinter.print(LI->getType(), Out);
+ Out << ", ";
}
// PrintAllTypes - Instructions who have operands of all the same type
Modified: llvm/trunk/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2003-02-26-AccessSizeTest.ll Fri Feb 27 15:17:42 2015
@@ -5,15 +5,15 @@
; RUN: opt < %s -basicaa -gvn -instcombine -S | FileCheck %s
define i32 @test() {
-; CHECK: %Y.DONOTREMOVE = load i32* %A
+; CHECK: %Y.DONOTREMOVE = load i32, i32* %A
; CHECK: %Z = sub i32 0, %Y.DONOTREMOVE
%A = alloca i32
store i32 0, i32* %A
- %X = load i32* %A
+ %X = load i32, i32* %A
%B = bitcast i32* %A to i8*
%C = getelementptr i8, i8* %B, i64 1
store i8 1, i8* %C ; Aliases %A
- %Y.DONOTREMOVE = load i32* %A
+ %Y.DONOTREMOVE = load i32, i32* %A
%Z = sub i32 %X, %Y.DONOTREMOVE
ret i32 %Z
}
Modified: llvm/trunk/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2003-04-22-GEPProblem.ll Fri Feb 27 15:17:42 2015
@@ -6,9 +6,9 @@ define i32 @test(i32 *%Ptr, i64 %V) {
; CHECK: sub i32 %X, %Y
%P2 = getelementptr i32, i32* %Ptr, i64 1
%P1 = getelementptr i32, i32* %Ptr, i64 %V
- %X = load i32* %P1
+ %X = load i32, i32* %P1
store i32 5, i32* %P2
- %Y = load i32* %P1
+ %Y = load i32, i32* %P1
%Z = sub i32 %X, %Y
ret i32 %Z
}
Modified: llvm/trunk/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2003-05-21-GEP-Problem.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define void @table_reindex(%struct..apr_
loopentry: ; preds = %0, %no_exit
%tmp.101 = getelementptr %struct..apr_table_t, %struct..apr_table_t* %t.1, i64 0, i32 0, i32 2
- %tmp.11 = load i32* %tmp.101 ; <i32> [#uses=0]
+ %tmp.11 = load i32, i32* %tmp.101 ; <i32> [#uses=0]
br i1 false, label %no_exit, label %UnifiedExitNode
no_exit: ; preds = %loopentry
Modified: llvm/trunk/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2003-06-01-AliasCrash.ll Fri Feb 27 15:17:42 2015
@@ -2,10 +2,10 @@
define i32 @MTConcat([3 x i32]* %a.1) {
%tmp.961 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4
- %tmp.97 = load i32* %tmp.961
+ %tmp.97 = load i32, i32* %tmp.961
%tmp.119 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 1, i64 0
- %tmp.120 = load i32* %tmp.119
+ %tmp.120 = load i32, i32* %tmp.119
%tmp.1541 = getelementptr [3 x i32], [3 x i32]* %a.1, i64 0, i64 4
- %tmp.155 = load i32* %tmp.1541
+ %tmp.155 = load i32, i32* %tmp.1541
ret i32 0
}
Modified: llvm/trunk/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2003-09-19-LocalArgument.ll Fri Feb 27 15:17:42 2015
@@ -7,9 +7,9 @@
define i32 @test(i32* %P) {
%X = alloca i32
- %V1 = load i32* %P
+ %V1 = load i32, i32* %P
store i32 0, i32* %X
- %V2 = load i32* %P
+ %V2 = load i32, i32* %P
%Diff = sub i32 %V1, %V2
ret i32 %Diff
}
Modified: llvm/trunk/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2006-03-03-BadArraySubscript.ll Fri Feb 27 15:17:42 2015
@@ -15,9 +15,9 @@ no_exit: ; preds = %no_exit, %entry
%tmp.6 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 %i.0.0 ; <i32*> [#uses=1]
store i32 1, i32* %tmp.6
%tmp.8 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 0, i32 0 ; <i32*> [#uses=1]
- %tmp.9 = load i32* %tmp.8 ; <i32> [#uses=1]
+ %tmp.9 = load i32, i32* %tmp.8 ; <i32> [#uses=1]
%tmp.11 = getelementptr [3 x [3 x i32]], [3 x [3 x i32]]* %X, i32 0, i32 1, i32 0 ; <i32*> [#uses=1]
- %tmp.12 = load i32* %tmp.11 ; <i32> [#uses=1]
+ %tmp.12 = load i32, i32* %tmp.11 ; <i32> [#uses=1]
%tmp.13 = add i32 %tmp.12, %tmp.9 ; <i32> [#uses=1]
%inc = add i32 %i.0.0, 1 ; <i32> [#uses=2]
%tmp.2 = icmp slt i32 %inc, %N ; <i1> [#uses=1]
Modified: llvm/trunk/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2007-01-13-BasePointerBadNoAlias.ll Fri Feb 27 15:17:42 2015
@@ -23,12 +23,12 @@ target triple = "i686-apple-darwin8"
define i32 @test(%struct.closure_type* %tmp18169) {
%tmp18174 = getelementptr %struct.closure_type, %struct.closure_type* %tmp18169, i32 0, i32 4, i32 0, i32 0 ; <i32*> [#uses=2]
%tmp18269 = bitcast i32* %tmp18174 to %struct.STYLE* ; <%struct.STYLE*> [#uses=1]
- %A = load i32* %tmp18174 ; <i32> [#uses=1]
+ %A = load i32, i32* %tmp18174 ; <i32> [#uses=1]
%tmp18272 = getelementptr %struct.STYLE, %struct.STYLE* %tmp18269, i32 0, i32 0, i32 0, i32 2 ; <i16*> [#uses=1]
store i16 123, i16* %tmp18272
- %Q = load i32* %tmp18174 ; <i32> [#uses=1]
+ %Q = load i32, i32* %tmp18174 ; <i32> [#uses=1]
%Z = sub i32 %A, %Q ; <i32> [#uses=1]
ret i32 %Z
}
Modified: llvm/trunk/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2007-08-05-GetOverloadedModRef.ll Fri Feb 27 15:17:42 2015
@@ -5,10 +5,10 @@ declare i16 @llvm.cttz.i16(i16, i1)
define i32 @test(i32* %P, i16* %Q) {
; CHECK: ret i32 0
- %A = load i16* %Q ; <i16> [#uses=1]
- %x = load i32* %P ; <i32> [#uses=1]
+ %A = load i16, i16* %Q ; <i16> [#uses=1]
+ %x = load i32, i32* %P ; <i32> [#uses=1]
%B = call i16 @llvm.cttz.i16( i16 %A, i1 true ) ; <i16> [#uses=1]
- %y = load i32* %P ; <i32> [#uses=1]
+ %y = load i32, i32* %P ; <i32> [#uses=1]
store i16 %B, i16* %Q
%z = sub i32 %x, %y ; <i32> [#uses=1]
ret i32 %z
Modified: llvm/trunk/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2007-10-24-ArgumentsGlobals.ll Fri Feb 27 15:17:42 2015
@@ -11,6 +11,6 @@ entry:
store i32 1, i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8
%tmp4 = getelementptr %struct.A, %struct.A* %b, i32 0, i32 0 ;<i32*> [#uses=1]
store i32 0, i32* %tmp4, align 4
- %tmp7 = load i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8 ; <i32> [#uses=1]
+ %tmp7 = load i32, i32* getelementptr (%struct.B* @a, i32 0, i32 0, i32 0), align 8 ; <i32> [#uses=1]
ret i32 %tmp7
}
Modified: llvm/trunk/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2007-11-05-SizeCrash.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
%tmp17 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 2, i64 1
; <i64*> [#uses=1]
%tmp1718 = bitcast i64* %tmp17 to i32* ; <i32*> [#uses=1]
- %tmp19 = load i32* %tmp1718, align 4 ; <i32> [#uses=0]
+ %tmp19 = load i32, i32* %tmp1718, align 4 ; <i32> [#uses=0]
br i1 false, label %cond_true34, label %done_okay
cond_true34: ; preds = %entry
@@ -25,7 +25,7 @@ cond_true34: ; preds = %entry
2305843009213693950 ; <i64*> [#uses=1]
%tmp70 = bitcast i64* %tmp631 to %struct.device**
- %tmp71 = load %struct.device** %tmp70, align 8
+ %tmp71 = load %struct.device*, %struct.device** %tmp70, align 8
ret i32 undef
Modified: llvm/trunk/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2007-12-08-OutOfBoundsCrash.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ target triple = "x86_64-unknown-linux-gn
define i32 @ehci_pci_setup(%struct.usb_hcd* %hcd) {
entry:
%tmp14 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 0, i32 0 ; <%struct.device**> [#uses=1]
- %tmp15 = load %struct.device** %tmp14, align 8 ; <%struct.device*> [#uses=0]
+ %tmp15 = load %struct.device*, %struct.device** %tmp14, align 8 ; <%struct.device*> [#uses=0]
br i1 false, label %bb25, label %return
bb25: ; preds = %entry
@@ -23,7 +23,7 @@ bb25: ; preds = %entry
cond_true: ; preds = %bb25
%tmp601 = getelementptr %struct.usb_hcd, %struct.usb_hcd* %hcd, i32 0, i32 1, i64 2305843009213693951 ; <i64*> [#uses=1]
%tmp67 = bitcast i64* %tmp601 to %struct.device** ; <%struct.device**> [#uses=1]
- %tmp68 = load %struct.device** %tmp67, align 8 ; <%struct.device*> [#uses=0]
+ %tmp68 = load %struct.device*, %struct.device** %tmp67, align 8 ; <%struct.device*> [#uses=0]
ret i32 undef
return: ; preds = %bb25, %entry
Modified: llvm/trunk/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2008-06-02-GEPTailCrash.ll Fri Feb 27 15:17:42 2015
@@ -10,6 +10,6 @@ target triple = "i686-pc-linux-gnu"
define void @test291() nounwind {
entry:
store i32 1138410269, i32* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2, i32 1)
- %tmp54 = load i32* bitcast (%struct.S291* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2) to i32*), align 4 ; <i32> [#uses=0]
+ %tmp54 = load i32, i32* bitcast (%struct.S291* getelementptr ([5 x %struct.S291]* @a291, i32 0, i32 2) to i32*), align 4 ; <i32> [#uses=0]
unreachable
}
Modified: llvm/trunk/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2008-11-23-NoaliasRet.ll Fri Feb 27 15:17:42 2015
@@ -9,6 +9,6 @@ define i32 @foo() {
%B = call i32* @_Znwj(i32 4)
store i32 1, i32* %A
store i32 2, i32* %B
- %C = load i32* %A
+ %C = load i32, i32* %A
ret i32 %C
}
Modified: llvm/trunk/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2009-03-04-GEPNoalias.ll Fri Feb 27 15:17:42 2015
@@ -3,12 +3,12 @@
declare noalias i32* @noalias()
define i32 @test(i32 %x) {
-; CHECK: load i32* %a
+; CHECK: load i32, i32* %a
%a = call i32* @noalias()
store i32 1, i32* %a
%b = getelementptr i32, i32* %a, i32 %x
store i32 2, i32* %b
- %c = load i32* %a
+ %c = load i32, i32* %a
ret i32 %c
}
Modified: llvm/trunk/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2009-10-13-AtomicModRef.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@ define i8 @foo(i8* %ptr) {
%P = getelementptr i8, i8* %ptr, i32 0
%Q = getelementptr i8, i8* %ptr, i32 1
; CHECK: getelementptr
- %X = load i8* %P
+ %X = load i8, i8* %P
%Y = atomicrmw add i8* %Q, i8 1 monotonic
- %Z = load i8* %P
+ %Z = load i8, i8* %P
; CHECK-NOT: = load
%A = sub i8 %X, %Z
ret i8 %A
Modified: llvm/trunk/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2009-10-13-GEP-BaseNoAlias.ll Fri Feb 27 15:17:42 2015
@@ -23,9 +23,9 @@ bb1:
bb2:
%P = phi i32* [ %b, %bb ], [ @Y, %bb1 ]
- %tmp1 = load i32* @Z, align 4
+ %tmp1 = load i32, i32* @Z, align 4
store i32 123, i32* %P, align 4
- %tmp2 = load i32* @Z, align 4
+ %tmp2 = load i32, i32* @Z, align 4
br label %return
return:
Modified: llvm/trunk/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2010-09-15-GEP-SignedArithmetic.ll Fri Feb 27 15:17:42 2015
@@ -9,9 +9,9 @@ define i32 @test(i32* %tab, i32 %indvar)
%tmp31 = mul i32 %indvar, -2
%tmp32 = add i32 %tmp31, 30
%t.5 = getelementptr i32, i32* %tab, i32 %tmp32
- %loada = load i32* %tab
+ %loada = load i32, i32* %tab
store i32 0, i32* %t.5
- %loadb = load i32* %tab
+ %loadb = load i32, i32* %tab
%rval = add i32 %loada, %loadb
ret i32 %rval
}
Modified: llvm/trunk/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/2014-03-18-Maxlookup-reached.ll Fri Feb 27 15:17:42 2015
@@ -30,7 +30,7 @@ define i32 @main() {
store i8 0, i8* %10
%11 = getelementptr inbounds i8, i8* %10, i32 -1
store i8 0, i8* %11
- %12 = load i32* %1, align 4
+ %12 = load i32, i32* %1, align 4
ret i32 %12
; CHECK: ret i32 %12
}
Modified: llvm/trunk/test/Analysis/BasicAA/aligned-overread.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/aligned-overread.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/aligned-overread.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/aligned-overread.ll Fri Feb 27 15:17:42 2015
@@ -9,10 +9,10 @@ target triple = "x86_64-apple-macosx10.8
define i32 @main() nounwind uwtable ssp {
entry:
- %tmp = load i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
+ %tmp = load i8, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
%tmp1 = or i8 %tmp, -128
store i8 %tmp1, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
- %tmp2 = load i64* bitcast ({ i8, i8, i8, i8, i8 }* @a to i64*), align 8
+ %tmp2 = load i64, i64* bitcast ({ i8, i8, i8, i8, i8 }* @a to i64*), align 8
store i8 11, i8* getelementptr inbounds ({ i8, i8, i8, i8, i8 }* @a, i64 0, i32 4), align 4
%tmp3 = trunc i64 %tmp2 to i32
ret i32 %tmp3
Modified: llvm/trunk/test/Analysis/BasicAA/args-rets-allocas-loads.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/args-rets-allocas-loads.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/args-rets-allocas-loads.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/args-rets-allocas-loads.ll Fri Feb 27 15:17:42 2015
@@ -22,8 +22,8 @@ define void @caller_a(double* %arg_a0,
%noalias_ret_a0 = call double* @noalias_returner()
%noalias_ret_a1 = call double* @noalias_returner()
- %loaded_a0 = load double** %indirect_a0
- %loaded_a1 = load double** %indirect_a1
+ %loaded_a0 = load double*, double** %indirect_a0
+ %loaded_a1 = load double*, double** %indirect_a1
call void @callee(double* %escape_alloca_a0)
call void @callee(double* %escape_alloca_a1)
Modified: llvm/trunk/test/Analysis/BasicAA/byval.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/byval.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/byval.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/byval.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define i32 @foo(%struct.x* byval %a) no
%tmp2 = getelementptr %struct.x, %struct.x* %a, i32 0, i32 0 ; <i32*> [#uses=2]
store i32 1, i32* %tmp2, align 4
store i32 2, i32* @g, align 4
- %tmp4 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
+ %tmp4 = load i32, i32* %tmp2, align 4 ; <i32> [#uses=1]
ret i32 %tmp4
}
Modified: llvm/trunk/test/Analysis/BasicAA/cas.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/cas.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/cas.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/cas.ll Fri Feb 27 15:17:42 2015
@@ -6,9 +6,9 @@
; CHECK: ret i32 0
define i32 @main() {
- %a = load i32* @flag0
+ %a = load i32, i32* @flag0
%b = atomicrmw xchg i32* @turn, i32 1 monotonic
- %c = load i32* @flag0
+ %c = load i32, i32* @flag0
%d = sub i32 %a, %c
ret i32 %d
}
Modified: llvm/trunk/test/Analysis/BasicAA/dag.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/dag.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/dag.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/dag.ll Fri Feb 27 15:17:42 2015
@@ -36,6 +36,6 @@ xc:
%bigbase = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase
- %loaded = load i8* %phi
+ %loaded = load i8, i8* %phi
ret i8 %loaded
}
Modified: llvm/trunk/test/Analysis/BasicAA/featuretest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/featuretest.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/featuretest.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/featuretest.ll Fri Feb 27 15:17:42 2015
@@ -19,12 +19,12 @@ define i32 @different_array_test(i64 %A,
call void @external(i32* %Array2)
%pointer = getelementptr i32, i32* %Array1, i64 %A
- %val = load i32* %pointer
+ %val = load i32, i32* %pointer
%pointer2 = getelementptr i32, i32* %Array2, i64 %B
store i32 7, i32* %pointer2
- %REMOVE = load i32* %pointer ; redundant with above load
+ %REMOVE = load i32, i32* %pointer ; redundant with above load
%retval = sub i32 %REMOVE, %val
ret i32 %retval
; CHECK: @different_array_test
@@ -41,9 +41,9 @@ define i32 @constant_array_index_test()
%P1 = getelementptr i32, i32* %Array, i64 7
%P2 = getelementptr i32, i32* %Array, i64 6
- %A = load i32* %P1
+ %A = load i32, i32* %P1
store i32 1, i32* %P2 ; Should not invalidate load
- %BREMOVE = load i32* %P1
+ %BREMOVE = load i32, i32* %P1
%Val = sub i32 %A, %BREMOVE
ret i32 %Val
; CHECK: @constant_array_index_test
@@ -53,10 +53,10 @@ define i32 @constant_array_index_test()
; Test that if two pointers are spaced out by a constant getelementptr, that
; they cannot alias.
define i32 @gep_distance_test(i32* %A) {
- %REMOVEu = load i32* %A
+ %REMOVEu = load i32, i32* %A
%B = getelementptr i32, i32* %A, i64 2 ; Cannot alias A
store i32 7, i32* %B
- %REMOVEv = load i32* %A
+ %REMOVEv = load i32, i32* %A
%r = sub i32 %REMOVEu, %REMOVEv
ret i32 %r
; CHECK: @gep_distance_test
@@ -67,10 +67,10 @@ define i32 @gep_distance_test(i32* %A) {
; cannot alias, even if there is a variable offset between them...
define i32 @gep_distance_test2({i32,i32}* %A, i64 %distance) {
%A1 = getelementptr {i32,i32}, {i32,i32}* %A, i64 0, i32 0
- %REMOVEu = load i32* %A1
+ %REMOVEu = load i32, i32* %A1
%B = getelementptr {i32,i32}, {i32,i32}* %A, i64 %distance, i32 1
store i32 7, i32* %B ; B cannot alias A, it's at least 4 bytes away
- %REMOVEv = load i32* %A1
+ %REMOVEv = load i32, i32* %A1
%r = sub i32 %REMOVEu, %REMOVEv
ret i32 %r
; CHECK: @gep_distance_test2
@@ -80,11 +80,11 @@ define i32 @gep_distance_test2({i32,i32}
; Test that we can do funny pointer things and that distance calc will still
; work.
define i32 @gep_distance_test3(i32 * %A) {
- %X = load i32* %A
+ %X = load i32, i32* %A
%B = bitcast i32* %A to i8*
%C = getelementptr i8, i8* %B, i64 4
store i8 42, i8* %C
- %Y = load i32* %A
+ %Y = load i32, i32* %A
%R = sub i32 %X, %Y
ret i32 %R
; CHECK: @gep_distance_test3
@@ -96,9 +96,9 @@ define i32 @constexpr_test() {
%X = alloca i32
call void @external(i32* %X)
- %Y = load i32* %X
+ %Y = load i32, i32* %X
store i32 5, i32* getelementptr ({ i32 }* @Global, i64 0, i32 0)
- %REMOVE = load i32* %X
+ %REMOVE = load i32, i32* %X
%retval = sub i32 %Y, %REMOVE
ret i32 %retval
; CHECK: @constexpr_test
@@ -113,12 +113,12 @@ define i16 @zext_sext_confusion(i16* %ro
entry:
%sum5.cast = zext i5 %j to i64 ; <i64> [#uses=1]
%P1 = getelementptr i16, i16* %row2col, i64 %sum5.cast
- %row2col.load.1.2 = load i16* %P1, align 1 ; <i16> [#uses=1]
+ %row2col.load.1.2 = load i16, i16* %P1, align 1 ; <i16> [#uses=1]
%sum13.cast31 = sext i5 %j to i6 ; <i6> [#uses=1]
%sum13.cast = zext i6 %sum13.cast31 to i64 ; <i64> [#uses=1]
%P2 = getelementptr i16, i16* %row2col, i64 %sum13.cast
- %row2col.load.1.6 = load i16* %P2, align 1 ; <i16> [#uses=1]
+ %row2col.load.1.6 = load i16, i16* %P2, align 1 ; <i16> [#uses=1]
%.ret = sub i16 %row2col.load.1.6, %row2col.load.1.2 ; <i16> [#uses=1]
ret i16 %.ret
Modified: llvm/trunk/test/Analysis/BasicAA/full-store-partial-alias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/full-store-partial-alias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/full-store-partial-alias.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/full-store-partial-alias.ll Fri Feb 27 15:17:42 2015
@@ -20,11 +20,11 @@ entry:
%u = alloca %union.anon, align 8
%tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0
store double %x, double* %tmp9, align 8, !tbaa !0
- %tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
+ %tmp2 = load i32, i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
%idxprom = sext i32 %tmp2 to i64
%tmp4 = bitcast %union.anon* %u to [2 x i32]*
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom
- %tmp5 = load i32* %arrayidx, align 4, !tbaa !3
+ %tmp5 = load i32, i32* %arrayidx, align 4, !tbaa !3
%tmp5.lobit = lshr i32 %tmp5, 31
ret i32 %tmp5.lobit
}
Modified: llvm/trunk/test/Analysis/BasicAA/gcsetest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/gcsetest.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/gcsetest.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/gcsetest.ll Fri Feb 27 15:17:42 2015
@@ -12,11 +12,11 @@
; CHECK-NEXT: ret i32 0
define i32 @test() {
- %A1 = load i32* @A
+ %A1 = load i32, i32* @A
store i32 123, i32* @B ; Store cannot alias @A
- %A2 = load i32* @A
+ %A2 = load i32, i32* @A
%X = sub i32 %A1, %A2
ret i32 %X
}
@@ -30,13 +30,13 @@ define i32 @test() {
; CHECK-NEXT: ret i32 0
define i32 @test2() {
- %A1 = load i32* @A
+ %A1 = load i32, i32* @A
br label %Loop
Loop:
%AP = phi i32 [0, %0], [%X, %Loop]
store i32 %AP, i32* @B ; Store cannot alias @A
- %A2 = load i32* @A
+ %A2 = load i32, i32* @A
%X = sub i32 %A1, %A2
%c = icmp eq i32 %X, 0
br i1 %c, label %out, label %Loop
@@ -55,7 +55,7 @@ define i32 @test3() {
%X = alloca i32
store i32 7, i32* %X
call void @external()
- %V = load i32* %X
+ %V = load i32, i32* %X
ret i32 %V
}
Modified: llvm/trunk/test/Analysis/BasicAA/gep-alias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/gep-alias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/gep-alias.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/gep-alias.ll Fri Feb 27 15:17:42 2015
@@ -7,11 +7,11 @@ define i32 @test1(i8 * %P) {
entry:
%Q = bitcast i8* %P to {i32, i32}*
%R = getelementptr {i32, i32}, {i32, i32}* %Q, i32 0, i32 1
- %S = load i32* %R
+ %S = load i32, i32* %R
%q = bitcast i8* %P to {i32, i32}*
%r = getelementptr {i32, i32}, {i32, i32}* %q, i32 0, i32 1
- %s = load i32* %r
+ %s = load i32, i32* %r
%t = sub i32 %S, %s
ret i32 %t
@@ -23,12 +23,12 @@ define i32 @test2(i8 * %P) {
entry:
%Q = bitcast i8* %P to {i32, i32, i32}*
%R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 1
- %S = load i32* %R
+ %S = load i32, i32* %R
%r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %Q, i32 0, i32 2
store i32 42, i32* %r
- %s = load i32* %R
+ %s = load i32, i32* %R
%t = sub i32 %S, %s
ret i32 %t
@@ -42,12 +42,12 @@ define i32 @test3({float, {i32, i32, i32
entry:
%P2 = getelementptr {float, {i32, i32, i32}}, {float, {i32, i32, i32}}* %P, i32 0, i32 1
%R = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 1
- %S = load i32* %R
+ %S = load i32, i32* %R
%r = getelementptr {i32, i32, i32}, {i32, i32, i32}* %P2, i32 0, i32 2
store i32 42, i32* %r
- %s = load i32* %R
+ %s = load i32, i32* %R
%t = sub i32 %S, %s
ret i32 %t
@@ -66,7 +66,7 @@ entry:
store i32 64, i32* %tmp2, align 8
%tmp3 = getelementptr inbounds %SmallPtrSet64, %SmallPtrSet64* %P, i64 0, i32 0, i32 4, i64 64
store i8* null, i8** %tmp3, align 8
- %tmp4 = load i32* %tmp2, align 8
+ %tmp4 = load i32, i32* %tmp2, align 8
ret i32 %tmp4
; CHECK-LABEL: @test4(
; CHECK: ret i32 64
@@ -77,9 +77,9 @@ define i32 @test5(i32* %p, i64 %i) {
%pi = getelementptr i32, i32* %p, i64 %i
%i.next = add i64 %i, 1
%pi.next = getelementptr i32, i32* %p, i64 %i.next
- %x = load i32* %pi
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test5(
@@ -90,9 +90,9 @@ define i32 @test5_as1_smaller_size(i32 a
%pi = getelementptr i32, i32 addrspace(1)* %p, i8 %i
%i.next = add i8 %i, 1
%pi.next = getelementptr i32, i32 addrspace(1)* %p, i8 %i.next
- %x = load i32 addrspace(1)* %pi
+ %x = load i32, i32 addrspace(1)* %pi
store i32 42, i32 addrspace(1)* %pi.next
- %y = load i32 addrspace(1)* %pi
+ %y = load i32, i32 addrspace(1)* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test5_as1_smaller_size(
@@ -104,9 +104,9 @@ define i32 @test5_as1_same_size(i32 addr
%pi = getelementptr i32, i32 addrspace(1)* %p, i16 %i
%i.next = add i16 %i, 1
%pi.next = getelementptr i32, i32 addrspace(1)* %p, i16 %i.next
- %x = load i32 addrspace(1)* %pi
+ %x = load i32, i32 addrspace(1)* %pi
store i32 42, i32 addrspace(1)* %pi.next
- %y = load i32 addrspace(1)* %pi
+ %y = load i32, i32 addrspace(1)* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test5_as1_same_size(
@@ -119,9 +119,9 @@ define i32 @test6(i32* %p, i64 %i1) {
%pi = getelementptr i32, i32* %p, i64 %i
%i.next = or i64 %i, 1
%pi.next = getelementptr i32, i32* %p, i64 %i.next
- %x = load i32* %pi
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test6(
@@ -133,9 +133,9 @@ define i32 @test7(i32* %p, i64 %i) {
%pi = getelementptr i32, i32* %p, i64 1
%i.next = shl i64 %i, 2
%pi.next = getelementptr i32, i32* %p, i64 %i.next
- %x = load i32* %pi
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test7(
@@ -150,9 +150,9 @@ define i32 @test8(i32* %p, i16 %i) {
%i.next = add i16 %i, 1
%i.next2 = zext i16 %i.next to i32
%pi.next = getelementptr i32, i32* %p, i32 %i.next2
- %x = load i32* %pi
+ %x = load i32, i32* %pi
store i32 42, i32* %pi.next
- %y = load i32* %pi
+ %y = load i32, i32* %pi
%z = sub i32 %x, %y
ret i32 %z
; CHECK-LABEL: @test8(
@@ -170,9 +170,9 @@ define i8 @test9([4 x i8] *%P, i32 %i, i
; P4 = P + 4*j
%P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %j2
- %x = load i8* %P2
+ %x = load i8, i8* %P2
store i8 42, i8* %P4
- %y = load i8* %P2
+ %y = load i8, i8* %P2
%z = sub i8 %x, %y
ret i8 %z
; CHECK-LABEL: @test9(
@@ -188,9 +188,9 @@ define i8 @test10([4 x i8] *%P, i32 %i)
; P4 = P + 4*i
%P4 = getelementptr [4 x i8], [4 x i8]* %P, i32 0, i32 %i2
- %x = load i8* %P2
+ %x = load i8, i8* %P2
store i8 42, i8* %P4
- %y = load i8* %P2
+ %y = load i8, i8* %P2
%z = sub i8 %x, %y
ret i8 %z
; CHECK-LABEL: @test10(
@@ -207,7 +207,7 @@ define float @test11(i32 %indvar, [4 x [
%y29 = getelementptr inbounds [2 x float], [2 x float]* %arrayidx28, i32 0, i32 1
store float 1.0, float* %y29, align 4
store i64 0, i64* %scevgep35, align 4
- %tmp30 = load float* %y29, align 4
+ %tmp30 = load float, float* %y29, align 4
ret float %tmp30
; CHECK-LABEL: @test11(
; CHECK: ret float %tmp30
@@ -223,7 +223,7 @@ define i32 @test12(i32 %x, i32 %y, i8* %
%castp = bitcast i8* %p to i32*
store i32 1, i32* %castp
store i32 0, i32* %castd
- %r = load i32* %castp
+ %r = load i32, i32* %castp
ret i32 %r
; CHECK-LABEL: @test12(
; CHECK: ret i32 %r
Modified: llvm/trunk/test/Analysis/BasicAA/global-size.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/global-size.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/global-size.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/global-size.ll Fri Feb 27 15:17:42 2015
@@ -8,9 +8,9 @@ target datalayout = "E-p:64:64:64-p1:16:
; CHECK-LABEL: @test1(
define i16 @test1(i32* %P) {
- %X = load i16* @B
+ %X = load i16, i16* @B
store i32 7, i32* %P
- %Y = load i16* @B
+ %Y = load i16, i16* @B
%Z = sub i16 %Y, %X
ret i16 %Z
; CHECK: ret i16 0
@@ -21,9 +21,9 @@ define i16 @test1(i32* %P) {
define i16 @test1_as1(i32 addrspace(1)* %P) {
; CHECK-LABEL: @test1_as1(
; CHECK: ret i16 0
- %X = load i16 addrspace(1)* @B_as1
+ %X = load i16, i16 addrspace(1)* @B_as1
store i32 7, i32 addrspace(1)* %P
- %Y = load i16 addrspace(1)* @B_as1
+ %Y = load i16, i16 addrspace(1)* @B_as1
%Z = sub i16 %Y, %X
ret i16 %Z
}
@@ -39,10 +39,10 @@ define i8 @test2(i32 %tmp79, i32 %w.2, i
%tmp93 = add i32 %w.2, %indvar89
%arrayidx416 = getelementptr [0 x i8], [0 x i8]* @window, i32 0, i32 %tmp93
- %A = load i8* %arrayidx412, align 1
+ %A = load i8, i8* %arrayidx412, align 1
store i8 4, i8* %arrayidx416, align 1
- %B = load i8* %arrayidx412, align 1
+ %B = load i8, i8* %arrayidx412, align 1
%C = sub i8 %A, %B
ret i8 %C
Modified: llvm/trunk/test/Analysis/BasicAA/invariant_load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/invariant_load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/invariant_load.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/invariant_load.ll Fri Feb 27 15:17:42 2015
@@ -10,15 +10,15 @@
define i32 @foo(i32* nocapture %p, i8* nocapture %q) {
entry:
- %0 = load i32* %p, align 4, !invariant.load !3
+ %0 = load i32, i32* %p, align 4, !invariant.load !3
%conv = trunc i32 %0 to i8
store i8 %conv, i8* %q, align 1
- %1 = load i32* %p, align 4, !invariant.load !3
+ %1 = load i32, i32* %p, align 4, !invariant.load !3
%add = add nsw i32 %1, 1
ret i32 %add
; CHECK: foo
-; CHECK: %0 = load i32* %p
+; CHECK: %0 = load i32, i32* %p
; CHECK: store i8 %conv, i8* %q,
; CHECK: %add = add nsw i32 %0, 1
}
Modified: llvm/trunk/test/Analysis/BasicAA/memset_pattern.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/memset_pattern.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/memset_pattern.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/memset_pattern.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ entry:
store i32 1, i32* @z
tail call void @memset_pattern16(i8* bitcast (i32* @y to i8*), i8* bitcast (i32* @x to i8*), i64 4) nounwind
; CHECK-NOT: load
- %l = load i32* @z
+ %l = load i32, i32* @z
; CHECK: ret i32 1
ret i32 %l
}
Modified: llvm/trunk/test/Analysis/BasicAA/modref.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/modref.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/modref.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/modref.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ define i32 @test0(i8* %P) {
call void @llvm.memset.p0i8.i32(i8* %P, i8 0, i32 42, i32 1, i1 false)
- %B = load i32* %A
+ %B = load i32, i32* %A
ret i32 %B
; CHECK-LABEL: @test0
@@ -29,7 +29,7 @@ define i8 @test1() {
call void @llvm.memcpy.p0i8.p0i8.i8(i8* %A, i8* %B, i8 -1, i32 0, i1 false)
- %C = load i8* %B
+ %C = load i8, i8* %B
ret i8 %C
; CHECK: ret i8 2
}
@@ -39,7 +39,7 @@ define i8 @test2(i8* %P) {
%P2 = getelementptr i8, i8* %P, i32 127
store i8 1, i8* %P2 ;; Not dead across memset
call void @llvm.memset.p0i8.i8(i8* %P, i8 2, i8 127, i32 0, i1 false)
- %A = load i8* %P2
+ %A = load i8, i8* %P2
ret i8 %A
; CHECK: ret i8 1
}
@@ -52,7 +52,7 @@ define i8 @test2a(i8* %P) {
store i8 1, i8* %P2 ;; Dead, clobbered by memset.
call void @llvm.memset.p0i8.i8(i8* %P, i8 2, i8 127, i32 0, i1 false)
- %A = load i8* %P2
+ %A = load i8, i8* %P2
ret i8 %A
; CHECK-NOT: load
; CHECK: ret i8 2
@@ -90,9 +90,9 @@ define void @test3a(i8* %P, i8 %X) {
@G2 = external global [4000 x i32]
define i32 @test4(i8* %P) {
- %tmp = load i32* @G1
+ %tmp = load i32, i32* @G1
call void @llvm.memset.p0i8.i32(i8* bitcast ([4000 x i32]* @G2 to i8*), i8 0, i32 4000, i32 1, i1 false)
- %tmp2 = load i32* @G1
+ %tmp2 = load i32, i32* @G1
%sub = sub i32 %tmp2, %tmp
ret i32 %sub
; CHECK-LABEL: @test4
@@ -105,9 +105,9 @@ define i32 @test4(i8* %P) {
; Verify that basicaa is handling variable length memcpy, knowing it doesn't
; write to G1.
define i32 @test5(i8* %P, i32 %Len) {
- %tmp = load i32* @G1
+ %tmp = load i32, i32* @G1
call void @llvm.memcpy.p0i8.p0i8.i32(i8* bitcast ([4000 x i32]* @G2 to i8*), i8* bitcast (i32* @G1 to i8*), i32 %Len, i32 1, i1 false)
- %tmp2 = load i32* @G1
+ %tmp2 = load i32, i32* @G1
%sub = sub i32 %tmp2, %tmp
ret i32 %sub
; CHECK: @test5
@@ -118,13 +118,13 @@ define i32 @test5(i8* %P, i32 %Len) {
}
define i8 @test6(i8* %p, i8* noalias %a) {
- %x = load i8* %a
+ %x = load i8, i8* %a
%t = va_arg i8* %p, float
- %y = load i8* %a
+ %y = load i8, i8* %a
%z = add i8 %x, %y
ret i8 %z
; CHECK-LABEL: @test6
-; CHECK: load i8* %a
+; CHECK: load i8, i8* %a
; CHECK-NOT: load
; CHECK: ret
}
@@ -137,12 +137,12 @@ entry:
store i32 0, i32* %x, align 4
%add.ptr = getelementptr inbounds i32, i32* %x, i64 1
call void @test7decl(i32* %add.ptr)
- %tmp = load i32* %x, align 4
+ %tmp = load i32, i32* %x, align 4
ret i32 %tmp
; CHECK-LABEL: @test7(
; CHECK: store i32 0
; CHECK: call void @test7decl
-; CHECK: load i32*
+; CHECK: load i32, i32*
}
declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind
Modified: llvm/trunk/test/Analysis/BasicAA/must-and-partial.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/must-and-partial.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/must-and-partial.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/must-and-partial.ll Fri Feb 27 15:17:42 2015
@@ -20,7 +20,7 @@ green:
%bigbase0 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase0
- %loaded = load i8* %phi
+ %loaded = load i8, i8* %phi
ret i8 %loaded
}
@@ -34,6 +34,6 @@ entry:
%bigbase1 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase1
- %loaded = load i8* %sel
+ %loaded = load i8, i8* %sel
ret i8 %loaded
}
Modified: llvm/trunk/test/Analysis/BasicAA/no-escape-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/no-escape-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/no-escape-call.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/no-escape-call.ll Fri Feb 27 15:17:42 2015
@@ -12,9 +12,9 @@ entry:
store i8* %tmp2, i8** %tmp4, align 4
%tmp10 = getelementptr i8, i8* %tmp2, i32 10 ; <i8*> [#uses=1]
store i8 42, i8* %tmp10, align 1
- %tmp14 = load i8** %tmp4, align 4 ; <i8*> [#uses=1]
+ %tmp14 = load i8*, i8** %tmp4, align 4 ; <i8*> [#uses=1]
%tmp16 = getelementptr i8, i8* %tmp14, i32 10 ; <i8*> [#uses=1]
- %tmp17 = load i8* %tmp16, align 1 ; <i8> [#uses=1]
+ %tmp17 = load i8, i8* %tmp16, align 1 ; <i8> [#uses=1]
%tmp19 = icmp eq i8 %tmp17, 42 ; <i1> [#uses=1]
ret i1 %tmp19
}
Modified: llvm/trunk/test/Analysis/BasicAA/noalias-bugs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/noalias-bugs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/noalias-bugs.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/noalias-bugs.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@ define i64 @testcase(%nested * noalias %
; CHECK; store i64 1
store i64 2, i64* %ptr.64, align 8
- %r = load i64* %either_ptr.64, align 8
+ %r = load i64, i64* %either_ptr.64, align 8
store i64 1, i64* %ptr.64, align 8
ret i64 %r
}
Modified: llvm/trunk/test/Analysis/BasicAA/noalias-param.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/noalias-param.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/noalias-param.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/noalias-param.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define void @no(i32* noalias %a, i32* %b
entry:
store i32 1, i32* %a
%cap = call i32* @captures(i32* %a) nounwind readonly
- %l = load i32* %b
+ %l = load i32, i32* %b
ret void
}
@@ -16,7 +16,7 @@ define void @yes(i32* %c, i32* %d) nounw
entry:
store i32 1, i32* %c
%cap = call i32* @captures(i32* %c) nounwind readonly
- %l = load i32* %d
+ %l = load i32, i32* %d
ret void
}
Modified: llvm/trunk/test/Analysis/BasicAA/nocapture.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/nocapture.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/nocapture.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/nocapture.ll Fri Feb 27 15:17:42 2015
@@ -6,9 +6,9 @@ define i32 @test2() {
; CHECK: ret i32 0
%P = alloca i32
%Q = call i32* @test(i32* %P)
- %a = load i32* %P
+ %a = load i32, i32* %P
store i32 4, i32* %Q ;; cannot clobber P since it is nocapture.
- %b = load i32* %P
+ %b = load i32, i32* %P
%c = sub i32 %a, %b
ret i32 %c
}
@@ -19,7 +19,7 @@ define i32 @test4(i32* noalias nocapture
; CHECK: call void @test3
; CHECK: store i32 0, i32* %p
; CHECK: store i32 1, i32* %x
-; CHECK: %y = load i32* %p
+; CHECK: %y = load i32, i32* %p
; CHECK: ret i32 %y
entry:
%q = alloca i32*
@@ -27,10 +27,10 @@ entry:
; attribute since the copy doesn't outlive the function.
call void @test3(i32** %q, i32* %p) nounwind
store i32 0, i32* %p
- %x = load i32** %q
+ %x = load i32*, i32** %q
; This store might write to %p and so we can't eliminate the subsequent
; load
store i32 1, i32* %x
- %y = load i32* %p
+ %y = load i32, i32* %p
ret i32 %y
}
Modified: llvm/trunk/test/Analysis/BasicAA/phi-aa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/phi-aa.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/phi-aa.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/phi-aa.ll Fri Feb 27 15:17:42 2015
@@ -25,9 +25,9 @@ bb1:
bb2:
%P = phi i32* [ @X, %bb ], [ @Y, %bb1 ]
- %tmp1 = load i32* @Z, align 4
+ %tmp1 = load i32, i32* @Z, align 4
store i32 123, i32* %P, align 4
- %tmp2 = load i32* @Z, align 4
+ %tmp2 = load i32, i32* @Z, align 4
br label %return
return:
@@ -52,14 +52,14 @@ codeRepl:
br i1 %targetBlock, label %for.body, label %bye
for.body:
- %1 = load i32* %jj7, align 4
+ %1 = load i32, i32* %jj7, align 4
%idxprom4 = zext i32 %1 to i64
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %oa5, i64 0, i64 %idxprom4
- %2 = load i32* %arrayidx5, align 4
+ %2 = load i32, i32* %arrayidx5, align 4
%sub6 = sub i32 %2, 6
store i32 %sub6, i32* %arrayidx5, align 4
; %0 and %arrayidx5 can alias! It is not safe to DSE the above store.
- %3 = load i32* %0, align 4
+ %3 = load i32, i32* %0, align 4
store i32 %3, i32* %arrayidx5, align 4
%sub11 = add i32 %1, -1
%idxprom12 = zext i32 %sub11 to i64
@@ -68,7 +68,7 @@ for.body:
br label %codeRepl
bye:
- %.reload = load i32* %jj7, align 4
+ %.reload = load i32, i32* %jj7, align 4
ret i32 %.reload
}
Modified: llvm/trunk/test/Analysis/BasicAA/phi-spec-order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/phi-spec-order.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/phi-spec-order.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/phi-spec-order.ll Fri Feb 27 15:17:42 2015
@@ -24,20 +24,20 @@ for.body4:
%lsr.iv46 = bitcast [16000 x double]* %lsr.iv4 to <4 x double>*
%lsr.iv12 = bitcast [16000 x double]* %lsr.iv1 to <4 x double>*
%scevgep11 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -2
- %i6 = load <4 x double>* %scevgep11, align 32
+ %i6 = load <4 x double>, <4 x double>* %scevgep11, align 32
%add = fadd <4 x double> %i6, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
store <4 x double> %add, <4 x double>* %lsr.iv12, align 32
%scevgep10 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 -1
- %i7 = load <4 x double>* %scevgep10, align 32
+ %i7 = load <4 x double>, <4 x double>* %scevgep10, align 32
%add.4 = fadd <4 x double> %i7, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
%scevgep9 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 1
store <4 x double> %add.4, <4 x double>* %scevgep9, align 32
- %i8 = load <4 x double>* %lsr.iv46, align 32
+ %i8 = load <4 x double>, <4 x double>* %lsr.iv46, align 32
%add.8 = fadd <4 x double> %i8, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
%scevgep8 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 2
store <4 x double> %add.8, <4 x double>* %scevgep8, align 32
%scevgep7 = getelementptr <4 x double>, <4 x double>* %lsr.iv46, i64 1
- %i9 = load <4 x double>* %scevgep7, align 32
+ %i9 = load <4 x double>, <4 x double>* %scevgep7, align 32
%add.12 = fadd <4 x double> %i9, <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
%scevgep3 = getelementptr <4 x double>, <4 x double>* %lsr.iv12, i64 3
store <4 x double> %add.12, <4 x double>* %scevgep3, align 32
Modified: llvm/trunk/test/Analysis/BasicAA/phi-speculation.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/phi-speculation.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/phi-speculation.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/phi-speculation.ll Fri Feb 27 15:17:42 2015
@@ -17,10 +17,10 @@ while.body:
%ptr2_phi = phi i32* [ %ptr2, %entry ], [ %ptr2_inc, %while.body ]
%result.09 = phi i32 [ 0 , %entry ], [ %add, %while.body ]
%dec = add nsw i32 %num, -1
- %0 = load i32* %ptr_phi, align 4
+ %0 = load i32, i32* %ptr_phi, align 4
store i32 %0, i32* %ptr2_phi, align 4
- %1 = load i32* %coeff, align 4
- %2 = load i32* %ptr_phi, align 4
+ %1 = load i32, i32* %coeff, align 4
+ %2 = load i32, i32* %ptr_phi, align 4
%mul = mul nsw i32 %1, %2
%add = add nsw i32 %mul, %result.09
%tobool = icmp eq i32 %dec, 0
@@ -52,10 +52,10 @@ while.body:
%ptr2_phi = phi i32* [ %ptr_outer_phi2, %outer.while.header ], [ %ptr2_inc, %while.body ]
%result.09 = phi i32 [ 0 , %outer.while.header ], [ %add, %while.body ]
%dec = add nsw i32 %num, -1
- %0 = load i32* %ptr_phi, align 4
+ %0 = load i32, i32* %ptr_phi, align 4
store i32 %0, i32* %ptr2_phi, align 4
- %1 = load i32* %coeff, align 4
- %2 = load i32* %ptr_phi, align 4
+ %1 = load i32, i32* %coeff, align 4
+ %2 = load i32, i32* %ptr_phi, align 4
%mul = mul nsw i32 %1, %2
%add = add nsw i32 %mul, %result.09
%tobool = icmp eq i32 %dec, 0
Modified: llvm/trunk/test/Analysis/BasicAA/pr18573.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/pr18573.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/pr18573.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/pr18573.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ declare <8 x float> @llvm.x86.avx2.gathe
; Function Attrs: nounwind
define <8 x float> @foo1(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
allocas:
- %vix = load <8 x i32>* %vix.ptr, align 4
+ %vix = load <8 x i32>, <8 x i32>* %vix.ptr, align 4
%t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4
%v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
@@ -31,7 +31,7 @@ allocas:
; Function Attrs: nounwind
define <8 x float> @foo2(i8* noalias readonly %arr.ptr, <8 x i32>* noalias readonly %vix.ptr, i8* noalias %t2.ptr) #1 {
allocas:
- %vix = load <8 x i32>* %vix.ptr, align 4
+ %vix = load <8 x i32>, <8 x i32>* %vix.ptr, align 4
%t1.ptr = getelementptr i8, i8* %arr.ptr, i8 4
%v1 = tail call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef, i8* %arr.ptr, <8 x i32> %vix, <8 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 1) #2
Modified: llvm/trunk/test/Analysis/BasicAA/store-promote.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/store-promote.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/store-promote.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/store-promote.ll Fri Feb 27 15:17:42 2015
@@ -10,11 +10,11 @@ target datalayout = "E-p:64:64:64-a0:0:8
@C = global [2 x i32] [ i32 4, i32 8 ] ; <[2 x i32]*> [#uses=2]
define i32 @test1(i1 %c) {
- %Atmp = load i32* @A ; <i32> [#uses=2]
+ %Atmp = load i32, i32* @A ; <i32> [#uses=2]
br label %Loop
Loop: ; preds = %Loop, %0
- %ToRemove = load i32* @A ; <i32> [#uses=1]
+ %ToRemove = load i32, i32* @A ; <i32> [#uses=1]
store i32 %Atmp, i32* @B
br i1 %c, label %Out, label %Loop
@@ -24,7 +24,7 @@ Out: ; preds = %Loop
; The Loop block should be empty after the load/store are promoted.
; CHECK: @test1
-; CHECK: load i32* @A
+; CHECK: load i32, i32* @A
; CHECK: Loop:
; CHECK-NEXT: br i1 %c, label %Out, label %Loop
; CHECK: Out:
@@ -35,10 +35,10 @@ define i32 @test2(i1 %c) {
br label %Loop
Loop: ; preds = %Loop, %0
- %AVal = load i32* @A ; <i32> [#uses=2]
+ %AVal = load i32, i32* @A ; <i32> [#uses=2]
%C0 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 0 ; <i32*> [#uses=1]
store i32 %AVal, i32* %C0
- %BVal = load i32* @B ; <i32> [#uses=2]
+ %BVal = load i32, i32* @B ; <i32> [#uses=2]
%C1 = getelementptr [2 x i32], [2 x i32]* @C, i64 0, i64 1 ; <i32*> [#uses=1]
store i32 %BVal, i32* %C1
br i1 %c, label %Out, label %Loop
Modified: llvm/trunk/test/Analysis/BasicAA/tailcall-modref.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/tailcall-modref.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/tailcall-modref.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/tailcall-modref.ll Fri Feb 27 15:17:42 2015
@@ -4,9 +4,9 @@ define i32 @test() {
; CHECK: ret i32 0
%A = alloca i32 ; <i32*> [#uses=3]
call void @foo( i32* %A )
- %X = load i32* %A ; <i32> [#uses=1]
+ %X = load i32, i32* %A ; <i32> [#uses=1]
tail call void @bar( )
- %Y = load i32* %A ; <i32> [#uses=1]
+ %Y = load i32, i32* %A ; <i32> [#uses=1]
%Z = sub i32 %X, %Y ; <i32> [#uses=1]
ret i32 %Z
}
Modified: llvm/trunk/test/Analysis/BasicAA/underlying-value.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/underlying-value.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/underlying-value.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/underlying-value.ll Fri Feb 27 15:17:42 2015
@@ -15,9 +15,9 @@ for.cond2:
for.body5: ; preds = %for.cond2
%arrayidx = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 0
- %tmp7 = load i64* %arrayidx, align 8
+ %tmp7 = load i64, i64* %arrayidx, align 8
%arrayidx9 = getelementptr inbounds [2 x i64], [2 x i64]* undef, i32 0, i64 undef
- %tmp10 = load i64* %arrayidx9, align 8
+ %tmp10 = load i64, i64* %arrayidx9, align 8
br label %for.cond2
for.end22: ; preds = %for.cond
Modified: llvm/trunk/test/Analysis/BasicAA/zext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BasicAA/zext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BasicAA/zext.ll (original)
+++ llvm/trunk/test/Analysis/BasicAA/zext.ll Fri Feb 27 15:17:42 2015
@@ -112,7 +112,7 @@ for.loop.exit:
define void @test_spec2006() {
%h = alloca [1 x [2 x i32*]], align 16
- %d.val = load i32* @d, align 4
+ %d.val = load i32, i32* @d, align 4
%d.promoted = sext i32 %d.val to i64
%1 = icmp slt i32 %d.val, 2
br i1 %1, label %.lr.ph, label %3
@@ -168,7 +168,7 @@ for.loop.exit:
define void @test_modulo_analysis_with_global() {
%h = alloca [1 x [2 x i32*]], align 16
- %b = load i32* @b, align 4
+ %b = load i32, i32* @b, align 4
%b.promoted = sext i32 %b to i64
br label %for.loop
Modified: llvm/trunk/test/Analysis/BlockFrequencyInfo/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BlockFrequencyInfo/basic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BlockFrequencyInfo/basic.ll (original)
+++ llvm/trunk/test/Analysis/BlockFrequencyInfo/basic.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
Modified: llvm/trunk/test/Analysis/BranchProbabilityInfo/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BranchProbabilityInfo/basic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BranchProbabilityInfo/basic.ll (original)
+++ llvm/trunk/test/Analysis/BranchProbabilityInfo/basic.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ body:
%iv = phi i32 [ 0, %entry ], [ %next, %body ]
%base = phi i32 [ 0, %entry ], [ %sum, %body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i32 %iv
- %0 = load i32* %arrayidx
+ %0 = load i32, i32* %arrayidx
%sum = add nsw i32 %0, %base
%next = add i32 %iv, 1
%exitcond = icmp eq i32 %next, %i
@@ -154,7 +154,7 @@ define i32 @test_cold_call_sites(i32* %a
entry:
%gep1 = getelementptr i32, i32* %a, i32 1
- %val1 = load i32* %gep1
+ %val1 = load i32, i32* %gep1
%cond1 = icmp ugt i32 %val1, 1
br i1 %cond1, label %then, label %else
@@ -165,7 +165,7 @@ then:
else:
%gep2 = getelementptr i32, i32* %a, i32 2
- %val2 = load i32* %gep2
+ %val2 = load i32, i32* %gep2
%val3 = call i32 @regular_function(i32 %val2)
br label %exit
Modified: llvm/trunk/test/Analysis/BranchProbabilityInfo/loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BranchProbabilityInfo/loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BranchProbabilityInfo/loop.ll (original)
+++ llvm/trunk/test/Analysis/BranchProbabilityInfo/loop.ll Fri Feb 27 15:17:42 2015
@@ -88,7 +88,7 @@ entry:
do.body:
%i.0 = phi i32 [ 0, %entry ], [ %inc4, %if.end ]
call void @g1()
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %do.body1, label %if.end
; CHECK: edge do.body -> do.body1 probability is 16 / 32 = 50%
@@ -124,7 +124,7 @@ entry:
do.body:
%i.0 = phi i32 [ 0, %entry ], [ %inc4, %do.end ]
call void @g1()
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %return, label %do.body1
; CHECK: edge do.body -> return probability is 4 / 128
@@ -169,7 +169,7 @@ do.body:
do.body1:
%j.0 = phi i32 [ 0, %do.body ], [ %inc, %if.end ]
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %return, label %if.end
; CHECK: edge do.body1 -> return probability is 4 / 128
@@ -214,7 +214,7 @@ do.body:
do.body1:
%j.0 = phi i32 [ 0, %do.body ], [ %inc, %do.cond ]
call void @g2()
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp = icmp slt i32 %0, 42
br i1 %cmp, label %return, label %do.cond
; CHECK: edge do.body1 -> return probability is 4 / 128
@@ -258,7 +258,7 @@ for.body.lr.ph:
for.body:
%i.011 = phi i32 [ 0, %for.body.lr.ph ], [ %inc6, %for.inc5 ]
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp1 = icmp eq i32 %0, %i.011
br i1 %cmp1, label %for.inc5, label %if.end
; CHECK: edge for.body -> for.inc5 probability is 16 / 32 = 50%
@@ -319,21 +319,21 @@ for.body:
for.body3:
%j.017 = phi i32 [ 0, %for.body ], [ %inc, %for.inc ]
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%cmp4 = icmp eq i32 %0, %j.017
br i1 %cmp4, label %for.inc, label %if.end
; CHECK: edge for.body3 -> for.inc probability is 16 / 32 = 50%
; CHECK: edge for.body3 -> if.end probability is 16 / 32 = 50%
if.end:
- %1 = load i32* %arrayidx5, align 4
+ %1 = load i32, i32* %arrayidx5, align 4
%cmp6 = icmp eq i32 %1, %j.017
br i1 %cmp6, label %for.inc, label %if.end8
; CHECK: edge if.end -> for.inc probability is 16 / 32 = 50%
; CHECK: edge if.end -> if.end8 probability is 16 / 32 = 50%
if.end8:
- %2 = load i32* %arrayidx9, align 4
+ %2 = load i32, i32* %arrayidx9, align 4
%cmp10 = icmp eq i32 %2, %j.017
br i1 %cmp10, label %for.inc, label %if.end12
; CHECK: edge if.end8 -> for.inc probability is 16 / 32 = 50%
Modified: llvm/trunk/test/Analysis/BranchProbabilityInfo/pr18705.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/BranchProbabilityInfo/pr18705.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/BranchProbabilityInfo/pr18705.ll (original)
+++ llvm/trunk/test/Analysis/BranchProbabilityInfo/pr18705.ll Fri Feb 27 15:17:42 2015
@@ -23,22 +23,22 @@ while.body:
%c.addr.09 = phi i32* [ %c, %while.body.lr.ph ], [ %c.addr.1, %if.end ]
%indvars.iv.next = add nsw i64 %indvars.iv, -1
%arrayidx = getelementptr inbounds float, float* %f0, i64 %indvars.iv.next
- %1 = load float* %arrayidx, align 4
+ %1 = load float, float* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds float, float* %f1, i64 %indvars.iv.next
- %2 = load float* %arrayidx2, align 4
+ %2 = load float, float* %arrayidx2, align 4
%cmp = fcmp une float %1, %2
br i1 %cmp, label %if.then, label %if.else
if.then:
%incdec.ptr = getelementptr inbounds i32, i32* %b.addr.011, i64 1
- %3 = load i32* %b.addr.011, align 4
+ %3 = load i32, i32* %b.addr.011, align 4
%add = add nsw i32 %3, 12
store i32 %add, i32* %b.addr.011, align 4
br label %if.end
if.else:
%incdec.ptr3 = getelementptr inbounds i32, i32* %c.addr.09, i64 1
- %4 = load i32* %c.addr.09, align 4
+ %4 = load i32, i32* %c.addr.09, align 4
%sub = add nsw i32 %4, -13
store i32 %sub, i32* %c.addr.09, align 4
br label %if.end
Modified: llvm/trunk/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll (original)
+++ llvm/trunk/test/Analysis/CFLAliasAnalysis/full-store-partial-alias.ll Fri Feb 27 15:17:42 2015
@@ -22,11 +22,11 @@ entry:
%u = alloca %union.anon, align 8
%tmp9 = getelementptr inbounds %union.anon, %union.anon* %u, i64 0, i32 0
store double %x, double* %tmp9, align 8, !tbaa !0
- %tmp2 = load i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
+ %tmp2 = load i32, i32* bitcast (i64* @endianness_test to i32*), align 8, !tbaa !3
%idxprom = sext i32 %tmp2 to i64
%tmp4 = bitcast %union.anon* %u to [2 x i32]*
%arrayidx = getelementptr inbounds [2 x i32], [2 x i32]* %tmp4, i64 0, i64 %idxprom
- %tmp5 = load i32* %arrayidx, align 4, !tbaa !3
+ %tmp5 = load i32, i32* %arrayidx, align 4, !tbaa !3
%tmp5.lobit = lshr i32 %tmp5, 31
ret i32 %tmp5.lobit
}
Modified: llvm/trunk/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll (original)
+++ llvm/trunk/test/Analysis/CFLAliasAnalysis/gep-signed-arithmetic.ll Fri Feb 27 15:17:42 2015
@@ -11,9 +11,9 @@ define i32 @test(i32 %indvar) nounwind {
%tmp31 = mul i32 %indvar, -2
%tmp32 = add i32 %tmp31, 30
%t.5 = getelementptr i32, i32* %tab, i32 %tmp32
- %loada = load i32* %tab
+ %loada = load i32, i32* %tab
store i32 0, i32* %t.5
- %loadb = load i32* %tab
+ %loadb = load i32, i32* %tab
%rval = add i32 %loada, %loadb
ret i32 %rval
}
Modified: llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll (original)
+++ llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel-combine.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ define void @test(i1 %C) {
store %T* %MS, %T** %M
- %AP = load %T** %M ; PartialAlias with %A, %B
+ %AP = load %T*, %T** %M ; PartialAlias with %A, %B
ret void
}
Modified: llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel.ll (original)
+++ llvm/trunk/test/Analysis/CFLAliasAnalysis/multilevel.ll Fri Feb 27 15:17:42 2015
@@ -23,8 +23,8 @@ define void @test() {
store %T* %A, %T** %M
store %T* %B, %T** %N
- %AP = load %T** %M ; PartialAlias with %A
- %BP = load %T** %N ; PartialAlias with %B
+ %AP = load %T*, %T** %M ; PartialAlias with %A
+ %BP = load %T*, %T** %N ; PartialAlias with %B
ret void
}
Modified: llvm/trunk/test/Analysis/CFLAliasAnalysis/must-and-partial.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CFLAliasAnalysis/must-and-partial.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CFLAliasAnalysis/must-and-partial.ll (original)
+++ llvm/trunk/test/Analysis/CFLAliasAnalysis/must-and-partial.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ green:
%bigbase0 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase0
- %loaded = load i8* %phi
+ %loaded = load i8, i8* %phi
ret i8 %loaded
}
@@ -37,7 +37,7 @@ entry:
%bigbase1 = bitcast i8* %base to i16*
store i16 -1, i16* %bigbase1
- %loaded = load i8* %sel
+ %loaded = load i8, i8* %sel
ret i8 %loaded
}
@@ -46,9 +46,9 @@ entry:
; CHECK: MayAlias: double* %A, double* %Index
define void @testr2(double* nocapture readonly %A, double* nocapture readonly %Index) {
%arrayidx22 = getelementptr inbounds double, double* %Index, i64 2
- %1 = load double* %arrayidx22
+ %1 = load double, double* %arrayidx22
%arrayidx25 = getelementptr inbounds double, double* %A, i64 2
- %2 = load double* %arrayidx25
+ %2 = load double, double* %arrayidx25
%mul26 = fmul double %1, %2
ret void
}
Modified: llvm/trunk/test/Analysis/CostModel/AArch64/store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/AArch64/store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/AArch64/store.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/AArch64/store.ll Fri Feb 27 15:17:42 2015
@@ -14,9 +14,9 @@ define void @store() {
; CHECK: cost of 64 {{.*}} store
store <4 x i8> undef, <4 x i8> * undef
; CHECK: cost of 16 {{.*}} load
- load <2 x i8> * undef
+ load <2 x i8> , <2 x i8> * undef
; CHECK: cost of 64 {{.*}} load
- load <4 x i8> * undef
+ load <4 x i8> , <4 x i8> * undef
ret void
}
Modified: llvm/trunk/test/Analysis/CostModel/ARM/insertelement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/ARM/insertelement.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/ARM/insertelement.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/ARM/insertelement.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ target triple = "thumbv7-apple-ios6.0.0"
; CHECK: insertelement_i8
define void @insertelement_i8(%T_i8* %saddr,
%T_i8v* %vaddr) {
- %v0 = load %T_i8v* %vaddr
- %v1 = load %T_i8* %saddr
+ %v0 = load %T_i8v, %T_i8v* %vaddr
+ %v1 = load %T_i8, %T_i8* %saddr
;CHECK: estimated cost of 3 for {{.*}} insertelement <8 x i8>
%v2 = insertelement %T_i8v %v0, %T_i8 %v1, i32 1
store %T_i8v %v2, %T_i8v* %vaddr
@@ -24,8 +24,8 @@ define void @insertelement_i8(%T_i8* %sa
; CHECK: insertelement_i16
define void @insertelement_i16(%T_i16* %saddr,
%T_i16v* %vaddr) {
- %v0 = load %T_i16v* %vaddr
- %v1 = load %T_i16* %saddr
+ %v0 = load %T_i16v, %T_i16v* %vaddr
+ %v1 = load %T_i16, %T_i16* %saddr
;CHECK: estimated cost of 3 for {{.*}} insertelement <4 x i16>
%v2 = insertelement %T_i16v %v0, %T_i16 %v1, i32 1
store %T_i16v %v2, %T_i16v* %vaddr
@@ -37,8 +37,8 @@ define void @insertelement_i16(%T_i16* %
; CHECK: insertelement_i32
define void @insertelement_i32(%T_i32* %saddr,
%T_i32v* %vaddr) {
- %v0 = load %T_i32v* %vaddr
- %v1 = load %T_i32* %saddr
+ %v0 = load %T_i32v, %T_i32v* %vaddr
+ %v1 = load %T_i32, %T_i32* %saddr
;CHECK: estimated cost of 3 for {{.*}} insertelement <2 x i32>
%v2 = insertelement %T_i32v %v0, %T_i32 %v1, i32 1
store %T_i32v %v2, %T_i32v* %vaddr
Modified: llvm/trunk/test/Analysis/CostModel/PowerPC/load_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/PowerPC/load_store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/PowerPC/load_store.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/PowerPC/load_store.ll Fri Feb 27 15:17:42 2015
@@ -19,26 +19,26 @@ define i32 @stores(i32 %arg) {
}
define i32 @loads(i32 %arg) {
; CHECK: cost of 1 {{.*}} load
- load i8* undef, align 4
+ load i8, i8* undef, align 4
; CHECK: cost of 1 {{.*}} load
- load i16* undef, align 4
+ load i16, i16* undef, align 4
; CHECK: cost of 1 {{.*}} load
- load i32* undef, align 4
+ load i32, i32* undef, align 4
; CHECK: cost of 2 {{.*}} load
- load i64* undef, align 4
+ load i64, i64* undef, align 4
; CHECK: cost of 4 {{.*}} load
- load i128* undef, align 4
+ load i128, i128* undef, align 4
; FIXME: There actually are sub-vector Altivec loads, and so we could handle
; this with a small expense, but we don't currently.
; CHECK: cost of 48 {{.*}} load
- load <4 x i16>* undef, align 2
+ load <4 x i16>, <4 x i16>* undef, align 2
; CHECK: cost of 1 {{.*}} load
- load <4 x i32>* undef, align 4
+ load <4 x i32>, <4 x i32>* undef, align 4
; CHECK: cost of 46 {{.*}} load
- load <3 x float>* undef, align 1
+ load <3 x float>, <3 x float>* undef, align 1
ret i32 undef
}
Modified: llvm/trunk/test/Analysis/CostModel/X86/intrinsic-cost.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/intrinsic-cost.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/intrinsic-cost.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/intrinsic-cost.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ vector.body:
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
store <4 x float> %2, <4 x float>* %1, align 4
%index.next = add i64 %index, 4
@@ -39,7 +39,7 @@ vector.body:
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
store <4 x float> %2, <4 x float>* %1, align 4
%index.next = add i64 %index, 4
@@ -67,7 +67,7 @@ vector.body:
%index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
%0 = getelementptr inbounds float, float* %f, i64 %index
%1 = bitcast float* %0 to <4 x float>*
- %wide.load = load <4 x float>* %1, align 4
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
%2 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %wide.load, <4 x float> %b, <4 x float> %c)
store <4 x float> %2, <4 x float>* %1, align 4
%index.next = add i64 %index, 4
Modified: llvm/trunk/test/Analysis/CostModel/X86/load_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/load_store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/load_store.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/load_store.ll Fri Feb 27 15:17:42 2015
@@ -34,49 +34,49 @@ define i32 @stores(i32 %arg) {
}
define i32 @loads(i32 %arg) {
;CHECK: cost of 1 {{.*}} load
- load i8* undef, align 4
+ load i8, i8* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load i16* undef, align 4
+ load i16, i16* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load i32* undef, align 4
+ load i32, i32* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load i64* undef, align 4
+ load i64, i64* undef, align 4
;CHECK: cost of 2 {{.*}} load
- load i128* undef, align 4
+ load i128, i128* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load <2 x i32>* undef, align 4
+ load <2 x i32>, <2 x i32>* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load <4 x i32>* undef, align 4
+ load <4 x i32>, <4 x i32>* undef, align 4
;CHECK: cost of 2 {{.*}} load
- load <8 x i32>* undef, align 4
+ load <8 x i32>, <8 x i32>* undef, align 4
;CHECK: cost of 1 {{.*}} load
- load <2 x i64>* undef, align 4
+ load <2 x i64>, <2 x i64>* undef, align 4
;CHECK: cost of 2 {{.*}} load
- load <4 x i64>* undef, align 4
+ load <4 x i64>, <4 x i64>* undef, align 4
;CHECK: cost of 4 {{.*}} load
- load <8 x i64>* undef, align 4
+ load <8 x i64>, <8 x i64>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x float>* undef, align 4
+ load <3 x float>, <3 x float>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x double>* undef, align 4
+ load <3 x double>, <3 x double>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x i32>* undef, align 4
+ load <3 x i32>, <3 x i32>* undef, align 4
;CHECK: cost of 3 {{.*}} load
- load <3 x i64>* undef, align 4
+ load <3 x i64>, <3 x i64>* undef, align 4
;CHECK: cost of 10 {{.*}} load
- load <5 x i32>* undef, align 4
+ load <5 x i32>, <5 x i32>* undef, align 4
;CHECK: cost of 10 {{.*}} load
- load <5 x i64>* undef, align 4
+ load <5 x i64>, <5 x i64>* undef, align 4
ret i32 undef
}
Modified: llvm/trunk/test/Analysis/CostModel/X86/loop_v2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/loop_v2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/loop_v2.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/loop_v2.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ vector.body:
%vec.phi = phi <2 x i32> [ zeroinitializer, %vector.ph ], [ %12, %vector.body ]
%0 = getelementptr inbounds i32, i32* %A, i64 %index
%1 = bitcast i32* %0 to <2 x i32>*
- %2 = load <2 x i32>* %1, align 4
+ %2 = load <2 x i32>, <2 x i32>* %1, align 4
%3 = sext <2 x i32> %2 to <2 x i64>
;CHECK: cost of 1 {{.*}} extract
%4 = extractelement <2 x i64> %3, i32 0
@@ -20,10 +20,10 @@ vector.body:
;CHECK: cost of 1 {{.*}} extract
%6 = extractelement <2 x i64> %3, i32 1
%7 = getelementptr inbounds i32, i32* %A, i64 %6
- %8 = load i32* %5, align 4
+ %8 = load i32, i32* %5, align 4
;CHECK: cost of 1 {{.*}} insert
%9 = insertelement <2 x i32> undef, i32 %8, i32 0
- %10 = load i32* %7, align 4
+ %10 = load i32, i32* %7, align 4
;CHECK: cost of 1 {{.*}} insert
%11 = insertelement <2 x i32> %9, i32 %10, i32 1
%12 = add nsw <2 x i32> %11, %vec.phi
Modified: llvm/trunk/test/Analysis/CostModel/X86/vectorized-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/CostModel/X86/vectorized-loop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/CostModel/X86/vectorized-loop.ll (original)
+++ llvm/trunk/test/Analysis/CostModel/X86/vectorized-loop.ll Fri Feb 27 15:17:42 2015
@@ -29,13 +29,13 @@ vector.body:
;CHECK: cost of 0 {{.*}} bitcast
%5 = bitcast i32* %4 to <8 x i32>*
;CHECK: cost of 2 {{.*}} load
- %6 = load <8 x i32>* %5, align 4
+ %6 = load <8 x i32>, <8 x i32>* %5, align 4
;CHECK: cost of 4 {{.*}} mul
%7 = mul nsw <8 x i32> %6, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%8 = getelementptr inbounds i32, i32* %A, i64 %index
%9 = bitcast i32* %8 to <8 x i32>*
;CHECK: cost of 2 {{.*}} load
- %10 = load <8 x i32>* %9, align 4
+ %10 = load <8 x i32>, <8 x i32>* %9, align 4
;CHECK: cost of 4 {{.*}} add
%11 = add nsw <8 x i32> %10, %7
;CHECK: cost of 2 {{.*}} store
@@ -54,12 +54,12 @@ for.body:
%13 = add nsw i64 %indvars.iv, 2
%arrayidx = getelementptr inbounds i32, i32* %B, i64 %13
;CHECK: cost of 1 {{.*}} load
- %14 = load i32* %arrayidx, align 4
+ %14 = load i32, i32* %arrayidx, align 4
;CHECK: cost of 1 {{.*}} mul
%mul = mul nsw i32 %14, 5
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
;CHECK: cost of 1 {{.*}} load
- %15 = load i32* %arrayidx2, align 4
+ %15 = load i32, i32* %arrayidx2, align 4
%add3 = add nsw i32 %15, %mul
store i32 %add3, i32* %arrayidx2, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
Modified: llvm/trunk/test/Analysis/Delinearization/gcd_multiply_expr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/Delinearization/gcd_multiply_expr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/Delinearization/gcd_multiply_expr.ll (original)
+++ llvm/trunk/test/Analysis/Delinearization/gcd_multiply_expr.ll Fri Feb 27 15:17:42 2015
@@ -27,7 +27,7 @@
define i32 @fn2() {
entry:
- %.pr = load i32* @d, align 4
+ %.pr = load i32, i32* @d, align 4
%phitmp = icmp eq i32 %.pr, 0
br label %for.cond
@@ -36,11 +36,11 @@ for.cond:
br i1 %0, label %for.cond, label %for.cond2thread-pre-split.preheader.i
for.cond2thread-pre-split.preheader.i:
- %1 = load i32* @g, align 4
- %2 = load i32* @h, align 4
+ %1 = load i32, i32* @g, align 4
+ %2 = load i32, i32* @h, align 4
%mul = mul nsw i32 %2, %1
- %3 = load i8** @f, align 4
- %.pr.pre.i = load i32* @b, align 4
+ %3 = load i8*, i8** @f, align 4
+ %.pr.pre.i = load i32, i32* @b, align 4
br label %for.cond2thread-pre-split.i
for.cond2thread-pre-split.i:
@@ -65,56 +65,56 @@ for.body4.i:
%8 = phi i32 [ %inc.7.i, %for.body4.i ], [ %.pr.i, %for.body4.i.preheader ]
%arrayidx.sum1 = add i32 %add.i, %8
%arrayidx.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum1
- %9 = load i8* %arrayidx.i, align 1
+ %9 = load i8, i8* %arrayidx.i, align 1
%conv.i = sext i8 %9 to i32
store i32 %conv.i, i32* @c, align 4
%inc.i = add nsw i32 %8, 1
store i32 %inc.i, i32* @b, align 4
%arrayidx.sum2 = add i32 %add.i, %inc.i
%arrayidx.1.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum2
- %10 = load i8* %arrayidx.1.i, align 1
+ %10 = load i8, i8* %arrayidx.1.i, align 1
%conv.1.i = sext i8 %10 to i32
store i32 %conv.1.i, i32* @c, align 4
%inc.1.i = add nsw i32 %8, 2
store i32 %inc.1.i, i32* @b, align 4
%arrayidx.sum3 = add i32 %add.i, %inc.1.i
%arrayidx.2.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum3
- %11 = load i8* %arrayidx.2.i, align 1
+ %11 = load i8, i8* %arrayidx.2.i, align 1
%conv.2.i = sext i8 %11 to i32
store i32 %conv.2.i, i32* @c, align 4
%inc.2.i = add nsw i32 %8, 3
store i32 %inc.2.i, i32* @b, align 4
%arrayidx.sum4 = add i32 %add.i, %inc.2.i
%arrayidx.3.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum4
- %12 = load i8* %arrayidx.3.i, align 1
+ %12 = load i8, i8* %arrayidx.3.i, align 1
%conv.3.i = sext i8 %12 to i32
store i32 %conv.3.i, i32* @c, align 4
%inc.3.i = add nsw i32 %8, 4
store i32 %inc.3.i, i32* @b, align 4
%arrayidx.sum5 = add i32 %add.i, %inc.3.i
%arrayidx.4.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum5
- %13 = load i8* %arrayidx.4.i, align 1
+ %13 = load i8, i8* %arrayidx.4.i, align 1
%conv.4.i = sext i8 %13 to i32
store i32 %conv.4.i, i32* @c, align 4
%inc.4.i = add nsw i32 %8, 5
store i32 %inc.4.i, i32* @b, align 4
%arrayidx.sum6 = add i32 %add.i, %inc.4.i
%arrayidx.5.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum6
- %14 = load i8* %arrayidx.5.i, align 1
+ %14 = load i8, i8* %arrayidx.5.i, align 1
%conv.5.i = sext i8 %14 to i32
store i32 %conv.5.i, i32* @c, align 4
%inc.5.i = add nsw i32 %8, 6
store i32 %inc.5.i, i32* @b, align 4
%arrayidx.sum7 = add i32 %add.i, %inc.5.i
%arrayidx.6.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum7
- %15 = load i8* %arrayidx.6.i, align 1
+ %15 = load i8, i8* %arrayidx.6.i, align 1
%conv.6.i = sext i8 %15 to i32
store i32 %conv.6.i, i32* @c, align 4
%inc.6.i = add nsw i32 %8, 7
store i32 %inc.6.i, i32* @b, align 4
%arrayidx.sum8 = add i32 %add.i, %inc.6.i
%arrayidx.7.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum8
- %16 = load i8* %arrayidx.7.i, align 1
+ %16 = load i8, i8* %arrayidx.7.i, align 1
%conv.7.i = sext i8 %16 to i32
store i32 %conv.7.i, i32* @c, align 4
%inc.7.i = add nsw i32 %8, 8
@@ -136,7 +136,7 @@ for.body4.ur.i:
%20 = phi i32 [ %inc.ur.i, %for.body4.ur.i ], [ %.ph, %for.body4.ur.i.preheader ]
%arrayidx.sum = add i32 %add.i, %20
%arrayidx.ur.i = getelementptr inbounds i8, i8* %3, i32 %arrayidx.sum
- %21 = load i8* %arrayidx.ur.i, align 1
+ %21 = load i8, i8* %arrayidx.ur.i, align 1
%conv.ur.i = sext i8 %21 to i32
store i32 %conv.ur.i, i32* @c, align 4
%inc.ur.i = add nsw i32 %20, 1
Modified: llvm/trunk/test/Analysis/Delinearization/himeno_1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/Delinearization/himeno_1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/Delinearization/himeno_1.ll (original)
+++ llvm/trunk/test/Analysis/Delinearization/himeno_1.ll Fri Feb 27 15:17:42 2015
@@ -36,23 +36,23 @@
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
entry:
%p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2
- %p.rows = load i32* %p.rows.ptr
+ %p.rows = load i32, i32* %p.rows.ptr
%p.rows.sub = add i32 %p.rows, -1
%p.rows.sext = sext i32 %p.rows.sub to i64
%p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3
- %p.cols = load i32* %p.cols.ptr
+ %p.cols = load i32, i32* %p.cols.ptr
%p.cols.sub = add i32 %p.cols, -1
%p.cols.sext = sext i32 %p.cols.sub to i64
%p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4
- %p.deps = load i32* %p.deps.ptr
+ %p.deps = load i32, i32* %p.deps.ptr
%p.deps.sub = add i32 %p.deps, -1
%p.deps.sext = sext i32 %p.deps.sub to i64
%a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3
- %a.cols = load i32* %a.cols.ptr
+ %a.cols = load i32, i32* %a.cols.ptr
%a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4
- %a.deps = load i32* %a.deps.ptr
+ %a.deps = load i32, i32* %a.deps.ptr
%a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0
- %a.base = load float** %a.base.ptr, align 8
+ %a.base = load float*, float** %a.base.ptr, align 8
br label %for.i
for.i: ; preds = %for.i.inc, %entry
Modified: llvm/trunk/test/Analysis/Delinearization/himeno_2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/Delinearization/himeno_2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/Delinearization/himeno_2.ll (original)
+++ llvm/trunk/test/Analysis/Delinearization/himeno_2.ll Fri Feb 27 15:17:42 2015
@@ -36,25 +36,25 @@
define void @jacobi(i32 %nn, %struct.Mat* nocapture %a, %struct.Mat* nocapture %p) nounwind uwtable {
entry:
%p.rows.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 2
- %p.rows = load i32* %p.rows.ptr
+ %p.rows = load i32, i32* %p.rows.ptr
%p.rows.sub = add i32 %p.rows, -1
%p.rows.sext = sext i32 %p.rows.sub to i64
%p.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 3
- %p.cols = load i32* %p.cols.ptr
+ %p.cols = load i32, i32* %p.cols.ptr
%p.cols.sub = add i32 %p.cols, -1
%p.cols.sext = sext i32 %p.cols.sub to i64
%p.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %p, i64 0, i32 4
- %p.deps = load i32* %p.deps.ptr
+ %p.deps = load i32, i32* %p.deps.ptr
%p.deps.sub = add i32 %p.deps, -1
%p.deps.sext = sext i32 %p.deps.sub to i64
%a.cols.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 3
- %a.cols = load i32* %a.cols.ptr
+ %a.cols = load i32, i32* %a.cols.ptr
%a.cols.sext = sext i32 %a.cols to i64
%a.deps.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 4
- %a.deps = load i32* %a.deps.ptr
+ %a.deps = load i32, i32* %a.deps.ptr
%a.deps.sext = sext i32 %a.deps to i64
%a.base.ptr = getelementptr inbounds %struct.Mat, %struct.Mat* %a, i64 0, i32 0
- %a.base = load float** %a.base.ptr, align 8
+ %a.base = load float*, float** %a.base.ptr, align 8
br label %for.i
for.i: ; preds = %for.i.inc, %entry
Modified: llvm/trunk/test/Analysis/Delinearization/multidim_only_ivs_2d.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/Delinearization/multidim_only_ivs_2d.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/Delinearization/multidim_only_ivs_2d.ll (original)
+++ llvm/trunk/test/Analysis/Delinearization/multidim_only_ivs_2d.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
; A[i][j] = 1.0;
; }
-; Inst: %val = load double* %arrayidx
+; Inst: %val = load double, double* %arrayidx
; In Loop with Header: for.j
; AddRec: {{0,+,(%m * sizeof(double))}<%for.i>,+,sizeof(double)}<%for.j>
; Base offset: %A
@@ -35,7 +35,7 @@ for.j:
%j = phi i64 [ 0, %for.i ], [ %j.inc, %for.j ]
%vlaarrayidx.sum = add i64 %j, %tmp
%arrayidx = getelementptr inbounds double, double* %A, i64 %vlaarrayidx.sum
- %val = load double* %arrayidx
+ %val = load double, double* %arrayidx
store double %val, double* %arrayidx
%j.inc = add nsw i64 %j, 1
%j.exitcond = icmp eq i64 %j.inc, %m
Modified: llvm/trunk/test/Analysis/Delinearization/undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/Delinearization/undef.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/Delinearization/undef.ll (original)
+++ llvm/trunk/test/Analysis/Delinearization/undef.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ for.body60:
%tmp6 = mul i64 %tmp5, undef
%arrayidx69.sum = add i64 undef, %tmp6
%arrayidx70 = getelementptr inbounds double, double* %Ey, i64 %arrayidx69.sum
- %1 = load double* %arrayidx70, align 8
+ %1 = load double, double* %arrayidx70, align 8
%inc = add nsw i64 %ix.062, 1
br i1 false, label %for.body60, label %for.end
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/Banerjee.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/Banerjee.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/Banerjee.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/Banerjee.ll Fri Feb 27 15:17:42 2015
@@ -46,7 +46,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -1
%arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
- %0 = load i64* %arrayidx6, align 8
+ %0 = load i64, i64* %arrayidx6, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -115,7 +115,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.03
%sub = add nsw i64 %add5, -1
%arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
- %2 = load i64* %arrayidx6, align 8
+ %2 = load i64, i64* %arrayidx6, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.12, i64 1
store i64 %2, i64* %B.addr.12, align 8
%inc = add nsw i64 %j.03, 1
@@ -181,7 +181,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 100
%arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
+ %0 = load i64, i64* %arrayidx7, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -240,7 +240,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 99
%arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
+ %0 = load i64, i64* %arrayidx7, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -299,7 +299,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -100
%arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
- %0 = load i64* %arrayidx6, align 8
+ %0 = load i64, i64* %arrayidx6, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -358,7 +358,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%sub = add nsw i64 %add5, -99
%arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %sub
- %0 = load i64* %arrayidx6, align 8
+ %0 = load i64, i64* %arrayidx6, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -417,7 +417,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 9
%arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
+ %0 = load i64, i64* %arrayidx7, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -476,7 +476,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 10
%arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
+ %0 = load i64, i64* %arrayidx7, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -535,7 +535,7 @@ for.body3:
%add5 = add nsw i64 %mul4, %j.02
%add6 = add nsw i64 %add5, 11
%arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
- %0 = load i64* %arrayidx7, align 8
+ %0 = load i64, i64* %arrayidx7, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -595,7 +595,7 @@ for.body3:
%sub = add i64 %i.03, %0
%add6 = add nsw i64 %sub, 11
%arrayidx7 = getelementptr inbounds i64, i64* %A, i64 %add6
- %1 = load i64* %arrayidx7, align 8
+ %1 = load i64, i64* %arrayidx7, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %1, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -654,7 +654,7 @@ for.body3:
%sub = add i64 %i.03, %0
%add5 = add nsw i64 %sub, 11
%arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
- %1 = load i64* %arrayidx6, align 8
+ %1 = load i64, i64* %arrayidx6, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %1, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -713,7 +713,7 @@ for.body3:
%sub = sub nsw i64 %mul4, %j.02
%add5 = add nsw i64 %sub, 11
%arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
- %0 = load i64* %arrayidx6, align 8
+ %0 = load i64, i64* %arrayidx6, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
@@ -772,7 +772,7 @@ for.body3:
%sub = sub nsw i64 %mul4, %j.02
%add5 = add nsw i64 %sub, 11
%arrayidx6 = getelementptr inbounds i64, i64* %A, i64 %add5
- %0 = load i64* %arrayidx6, align 8
+ %0 = load i64, i64* %arrayidx6, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.11, i64 1
store i64 %0, i64* %B.addr.11, align 8
%inc = add nsw i64 %j.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/Coupled.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/Coupled.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/Coupled.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/Coupled.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ for.body:
%add = add nsw i64 %i.02, 9
%add2 = add nsw i64 %i.02, 10
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -65,7 +65,7 @@ for.body:
%add = add nsw i64 %i.02, 9
%add2 = add nsw i64 %i.02, 9
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add2, i64 %add
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -103,7 +103,7 @@ for.body:
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub
store i32 %conv, i32* %arrayidx3, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -141,7 +141,7 @@ for.body:
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub2, i64 %sub
store i32 %conv, i32* %arrayidx3, align 4
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -180,7 +180,7 @@ for.body:
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub3, i64 %sub
store i32 %conv, i32* %arrayidx4, align 4
%arrayidx6 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx6, align 4
+ %0 = load i32, i32* %arrayidx6, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -221,7 +221,7 @@ for.body:
%arrayidx5 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add, i64 %sub
store i32 %conv, i32* %arrayidx5, align 4
%arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -257,7 +257,7 @@ for.body:
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub
store i32 %conv, i32* %arrayidx1, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -293,7 +293,7 @@ for.body:
%arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %sub
store i32 %conv, i32* %arrayidx1, align 4
%arrayidx3 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -330,7 +330,7 @@ for.body:
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -367,7 +367,7 @@ for.body:
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -405,7 +405,7 @@ for.body:
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -443,7 +443,7 @@ for.body:
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -481,7 +481,7 @@ for.body:
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -518,7 +518,7 @@ for.body:
%arrayidx2 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %sub1, i64 %sub
store i32 %conv, i32* %arrayidx2, align 4
%arrayidx4 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -555,7 +555,7 @@ for.body:
%arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
store i32 %conv, i32* %arrayidx3, align 4
%arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx6, align 4
+ %0 = load i32, i32* %arrayidx6, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
@@ -592,7 +592,7 @@ for.body:
%arrayidx3 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub1, i64 %sub, i64 %i.02
store i32 %conv, i32* %arrayidx3, align 4
%arrayidx6 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.02, i64 %i.02, i64 %i.02
- %0 = load i32* %arrayidx6, align 4
+ %0 = load i32, i32* %arrayidx6, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add nsw i64 %i.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/ExactRDIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/ExactRDIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/ExactRDIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/ExactRDIV.ll Fri Feb 27 15:17:42 2015
@@ -41,7 +41,7 @@ for.body4:
%mul5 = shl nsw i64 %j.02, 1
%add64 = or i64 %mul5, 1
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add64
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc9 = add nsw i64 %j.02, 1
@@ -87,7 +87,7 @@ for.body4:
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -133,7 +133,7 @@ for.body4:
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -179,7 +179,7 @@ for.body4:
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -225,7 +225,7 @@ for.body4:
%j.02 = phi i64 [ %inc7, %for.body4 ], [ 0, %for.body4.preheader ]
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %j.02
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -272,7 +272,7 @@ for.body4:
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -319,7 +319,7 @@ for.body4:
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -366,7 +366,7 @@ for.body4:
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -413,7 +413,7 @@ for.body4:
%B.addr.01 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub = sub nsw i64 0, %j.02
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc7 = add nsw i64 %j.02, 1
@@ -455,7 +455,7 @@ for.body3:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -504,7 +504,7 @@ for.body3:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -552,7 +552,7 @@ for.body3:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -600,7 +600,7 @@ for.body3:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %sub
store i32 %conv, i32* %arrayidx, align 4
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 45
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/ExactSIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/ExactSIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/ExactSIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/ExactSIV.ll Fri Feb 27 15:17:42 2015
@@ -30,7 +30,7 @@ for.body:
%mul = shl i64 %i.02, 1
%add13 = or i64 %mul, 1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add13
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -68,7 +68,7 @@ for.body:
%mul1 = shl i64 %i.02, 1
%add23 = or i64 %mul1, 1
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add23
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -104,7 +104,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -140,7 +140,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -176,7 +176,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -212,7 +212,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -248,7 +248,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -284,7 +284,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%add = add i64 %i.02, 60
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -320,7 +320,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -356,7 +356,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -392,7 +392,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -428,7 +428,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -464,7 +464,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -500,7 +500,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub1 = sub i64 -60, %i.02
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/GCD.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/GCD.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/GCD.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/GCD.ll Fri Feb 27 15:17:42 2015
@@ -49,7 +49,7 @@ for.body3:
%mul6 = shl nsw i64 %j.02, 3
%add = add nsw i64 %mul5, %mul6
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -111,7 +111,7 @@ for.body3:
%add = add nsw i64 %mul5, %mul6
%add7 = or i64 %add, 1
%arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
- %0 = load i32* %arrayidx8, align 4
+ %0 = load i32, i32* %arrayidx8, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -173,7 +173,7 @@ for.body3:
%mul6 = shl nsw i64 %j.02, 3
%add7 = add nsw i64 %mul5, %mul6
%arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
- %0 = load i32* %arrayidx8, align 4
+ %0 = load i32, i32* %arrayidx8, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -233,7 +233,7 @@ for.body3:
%add5 = add nsw i64 %i.03, %mul4
%sub = add nsw i64 %add5, -1
%arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx6, align 4
+ %0 = load i32, i32* %arrayidx6, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -303,7 +303,7 @@ for.body3:
%sub = sub nsw i64 %add12, %mul14
%add15 = add nsw i64 %sub, 4
%arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15
- %0 = load i32* %arrayidx16, align 4
+ %0 = load i32, i32* %arrayidx16, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -373,7 +373,7 @@ for.body3:
%sub = sub nsw i64 %add12, %mul14
%add15 = add nsw i64 %sub, 5
%arrayidx16 = getelementptr inbounds i32, i32* %A, i64 %add15
- %0 = load i32* %arrayidx16, align 4
+ %0 = load i32, i32* %arrayidx16, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -445,7 +445,7 @@ for.body3:
%1 = mul nsw i64 %mul7, %n
%arrayidx8.sum = add i64 %1, %add7
%arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %arrayidx8.sum
- %2 = load i32* %arrayidx9, align 4
+ %2 = load i32, i32* %arrayidx9, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %2, i32* %B.addr.12, align 4
%inc = add nsw i64 %j.03, 1
@@ -536,7 +536,7 @@ for.body3:
%10 = mul nsw i64 %idxprom10, %0
%arrayidx11.sum = add i64 %10, %idxprom8
%arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum
- %11 = load i32* %arrayidx12, align 4
+ %11 = load i32, i32* %arrayidx12, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %11, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -623,7 +623,7 @@ for.body3:
%add10 = or i32 %add9, 1
%idxprom11 = sext i32 %add10 to i64
%arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %idxprom11
- %5 = load i32* %arrayidx12, align 4
+ %5 = load i32, i32* %arrayidx12, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %5, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -715,7 +715,7 @@ for.body3:
%10 = mul nsw i64 %idxprom10, %0
%arrayidx11.sum = add i64 %10, %idxprom8
%arrayidx12 = getelementptr inbounds i32, i32* %A, i64 %arrayidx11.sum
- %11 = load i32* %arrayidx12, align 4
+ %11 = load i32, i32* %arrayidx12, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %11, i32* %B.addr.12, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/Invariant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/Invariant.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/Invariant.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/Invariant.ll Fri Feb 27 15:17:42 2015
@@ -20,9 +20,9 @@ for.body3:
%j.02 = phi i32 [ 0, %for.cond1.preheader ], [ %add8, %for.body3 ]
%res.11 = phi float [ %res.03, %for.cond1.preheader ], [ %add.res.1, %for.body3 ]
%arrayidx4 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %j.02, i32 %j.02
- %0 = load float* %arrayidx4, align 4
+ %0 = load float, float* %arrayidx4, align 4
%arrayidx6 = getelementptr inbounds [40 x float], [40 x float]* %rr, i32 %i.04, i32 %j.02
- %1 = load float* %arrayidx6, align 4
+ %1 = load float, float* %arrayidx6, align 4
%add = fadd float %0, %1
%cmp7 = fcmp ogt float %add, %g
%add.res.1 = select i1 %cmp7, float %add, float %res.11
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/NonCanonicalizedSubscript.ll Fri Feb 27 15:17:42 2015
@@ -28,7 +28,7 @@ for.body:
%i = phi i64 [ 0, %entry ], [ %i.inc, %for.body ]
%a.addr = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i64 %i
%a.addr.2 = getelementptr [100 x [100 x i32]], [100 x [100 x i32]]* %a, i64 0, i64 %i, i32 5
- %0 = load i32* %a.addr, align 4
+ %0 = load i32, i32* %a.addr, align 4
%1 = add i32 %0, 1
store i32 %1, i32* %a.addr.2, align 4
%i.inc = add nsw i64 %i, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/Preliminary.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/Preliminary.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/Preliminary.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/Preliminary.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ entry:
; CHECK: da analyze - none!
%arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
ret i32 %0
}
@@ -36,7 +36,7 @@ entry:
; CHECK: da analyze - none!
%arrayidx1 = getelementptr inbounds i32, i32* %B, i64 1
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
ret i32 %0
}
@@ -107,7 +107,7 @@ for.body12:
%add13 = add nsw i64 %j.07, 2
%add14 = add nsw i64 %i.011, 3
%arrayidx17 = getelementptr inbounds [100 x [100 x i64]], [100 x [100 x i64]]* %A, i64 %add14, i64 %add13, i64 %add
- %0 = load i64* %arrayidx17, align 8
+ %0 = load i64, i64* %arrayidx17, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.24, i64 1
store i64 %0, i64* %B.addr.24, align 8
%inc19 = add nsw i64 %k9.05, 1
@@ -290,7 +290,7 @@ for.body33:
%sub48 = sub nsw i64 1, %k.037
%add49 = add nsw i64 %i.045, 3
%arrayidx57 = getelementptr inbounds [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]], [100 x [100 x [100 x [100 x [100 x [100 x [100 x i64]]]]]]]* %A, i64 %add49, i64 2, i64 %u.06, i64 %sub48, i64 %sub47, i64 %o.025, i64 %add45, i64 %add44
- %0 = load i64* %arrayidx57, align 8
+ %0 = load i64, i64* %arrayidx57, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %B.addr.112, i64 1
store i64 %0, i64* %B.addr.112, align 8
%inc = add nsw i64 %t.03, 1
@@ -445,7 +445,7 @@ for.body:
store i32 %conv2, i32* %arrayidx, align 4
%idxprom4 = sext i8 %i.03 to i64
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i8 %i.03, 1
@@ -491,7 +491,7 @@ for.body:
store i32 %conv2, i32* %arrayidx, align 4
%idxprom4 = sext i16 %i.03 to i64
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %idxprom4
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i16 %i.03, 1
@@ -535,7 +535,7 @@ for.body:
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -570,7 +570,7 @@ entry:
%conv = sext i8 %n to i64
%add = add i64 %conv, 1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -596,7 +596,7 @@ entry:
%conv = sext i16 %n to i64
%add = add i64 %conv, 1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -622,7 +622,7 @@ entry:
%add = add nsw i32 %n, 1
%idxprom1 = sext i32 %add to i64
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -648,7 +648,7 @@ entry:
%add = add i32 %n, 1
%idxprom1 = zext i32 %add to i64
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %idxprom1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -682,7 +682,7 @@ while.body.preheader:
while.body: ; preds = %while.body.preheader, %while.body
%i.02 = phi %struct.S* [ %incdec.ptr, %while.body ], [ %s, %while.body.preheader ]
%0 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1, i32 0
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
%2 = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 0, i32 0
store i32 %1, i32* %2, align 4
%incdec.ptr = getelementptr inbounds %struct.S, %struct.S* %i.02, i64 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/Propagating.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/Propagating.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/Propagating.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/Propagating.ll Fri Feb 27 15:17:42 2015
@@ -36,7 +36,7 @@ for.body3:
store i32 %conv, i32* %arrayidx5, align 4
%add6 = add nsw i64 %i.03, %j.02
%arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6
- %0 = load i32* %arrayidx8, align 4
+ %0 = load i32, i32* %arrayidx8, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -93,7 +93,7 @@ for.body6:
%add10 = add nsw i64 %j.03, %k.02
%sub11 = sub nsw i64 %j.03, %i.05
%arrayidx14 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %sub11, i64 %i.05, i64 %add10
- %0 = load i32* %arrayidx14, align 4
+ %0 = load i32, i32* %arrayidx14, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.21, i64 1
store i32 %0, i32* %B.addr.21, align 4
%inc = add nsw i64 %k.02, 1
@@ -149,7 +149,7 @@ for.body3:
%add = add nsw i64 %i.03, %j.02
%add5 = add nsw i64 %add, 110
%arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add5
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -200,7 +200,7 @@ for.body3:
%sub = sub nsw i64 %mul5, %i.03
%add6 = add nsw i64 %sub, 5
%arrayidx8 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add6
- %0 = load i32* %arrayidx8, align 4
+ %0 = load i32, i32* %arrayidx8, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -252,7 +252,7 @@ for.body3:
%mul7 = shl nsw i64 %i.03, 1
%add8 = add nsw i64 %mul7, %j.02
%arrayidx10 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %i.03, i64 %add8
- %0 = load i32* %arrayidx10, align 4
+ %0 = load i32, i32* %arrayidx10, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -306,7 +306,7 @@ for.body3:
%mul8 = mul nsw i64 %i.03, 3
%add9 = add nsw i64 %mul8, %j.02
%arrayidx12 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 %i.03, i64 %i.03, i64 %add9
- %0 = load i32* %arrayidx12, align 4
+ %0 = load i32, i32* %arrayidx12, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -359,7 +359,7 @@ for.body3:
%add8 = add nsw i64 %mul7, %j.02
%mul9 = shl nsw i64 %i.03, 1
%arrayidx11 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %mul9, i64 %add8
- %0 = load i32* %arrayidx11, align 4
+ %0 = load i32, i32* %arrayidx11, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -415,7 +415,7 @@ for.body3:
%mul10 = mul nsw i64 %i.03, -2
%add11 = add nsw i64 %mul10, 20
%arrayidx13 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add11, i64 %add9
- %0 = load i32* %arrayidx13, align 4
+ %0 = load i32, i32* %arrayidx13, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -466,7 +466,7 @@ for.body3:
%mul6 = mul nsw i64 %i.03, -2
%add7 = add nsw i64 %mul6, 4
%arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add7, i64 %add5
- %0 = load i32* %arrayidx9, align 4
+ %0 = load i32, i32* %arrayidx9, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
@@ -517,7 +517,7 @@ for.body3:
%arrayidx7 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 %add6, i64 %add4
store i32 %conv, i32* %arrayidx7, align 4
%arrayidx9 = getelementptr inbounds [100 x i32], [100 x i32]* %A, i64 4, i64 %j.02
- %0 = load i32* %arrayidx9, align 4
+ %0 = load i32, i32* %arrayidx9, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.11, i64 1
store i32 %0, i32* %B.addr.11, align 4
%inc = add nsw i64 %j.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/Separability.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/Separability.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/Separability.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/Separability.ll Fri Feb 27 15:17:42 2015
@@ -50,7 +50,7 @@ for.body9:
%sub = sub nsw i64 %mul, %l.02
%add12 = add nsw i64 %i.07, 10
%arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
- %0 = load i32* %arrayidx15, align 4
+ %0 = load i32, i32* %arrayidx15, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
@@ -124,7 +124,7 @@ for.body9:
%sub = sub nsw i64 %mul, %l.02
%add12 = add nsw i64 %i.07, 10
%arrayidx15 = getelementptr inbounds [100 x [100 x i32]], [100 x [100 x i32]]* %A, i64 10, i64 %add12, i64 %sub
- %0 = load i32* %arrayidx15, align 4
+ %0 = load i32, i32* %arrayidx15, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
@@ -198,7 +198,7 @@ for.body9:
%add14 = add nsw i64 %j.05, %k.03
%add15 = add nsw i64 %i.07, 10
%arrayidx19 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add15, i64 %add14, i64 %add13
- %0 = load i32* %arrayidx19, align 4
+ %0 = load i32, i32* %arrayidx19, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
@@ -273,7 +273,7 @@ for.body9:
%add15 = add nsw i64 %j.05, %k.03
%add16 = add nsw i64 %i.07, 10
%arrayidx20 = getelementptr inbounds [100 x [100 x [100 x i32]]], [100 x [100 x [100 x i32]]]* %A, i64 10, i64 %add16, i64 %add15, i64 %add14
- %0 = load i32* %arrayidx20, align 4
+ %0 = load i32, i32* %arrayidx20, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.31, i64 1
store i32 %0, i32* %B.addr.31, align 4
%inc = add nsw i64 %l.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/StrongSIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/StrongSIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/StrongSIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/StrongSIV.ll Fri Feb 27 15:17:42 2015
@@ -32,7 +32,7 @@ for.body:
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %2 = load i32* %arrayidx3, align 4
+ %2 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -75,7 +75,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv2, i32* %arrayidx, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %i.03
- %1 = load i32* %arrayidx3, align 4
+ %1 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -117,7 +117,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.03
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -159,7 +159,7 @@ for.body:
%1 = trunc i64 %indvars.iv to i32
store i32 %1, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %indvars.iv
- %2 = load i32* %arrayidx2, align 4
+ %2 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %2, i32* %B.addr.02, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
@@ -198,7 +198,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -233,7 +233,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -270,7 +270,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -307,7 +307,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%mul1 = shl i64 %i.02, 1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %mul1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -342,7 +342,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %i.02
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -383,7 +383,7 @@ for.body:
%mul = shl i64 %n, 1
%add1 = add i64 %i.03, %mul
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -424,7 +424,7 @@ for.body:
%mul1 = mul i64 %i.02, %n
%add2 = add i64 %mul1, 5
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicRDIV.ll Fri Feb 27 15:17:42 2015
@@ -53,7 +53,7 @@ for.body4:
%mul56 = add i64 %j.03, %n1
%add7 = mul i64 %mul56, 3
%arrayidx8 = getelementptr inbounds i32, i32* %A, i64 %add7
- %0 = load i32* %arrayidx8, align 4
+ %0 = load i32, i32* %arrayidx8, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc10 = add nsw i64 %j.03, 1
@@ -118,7 +118,7 @@ for.body5:
%mul7 = shl i64 %n2, 1
%add8 = add i64 %mul6, %mul7
%arrayidx9 = getelementptr inbounds i32, i32* %A, i64 %add8
- %0 = load i32* %arrayidx9, align 4
+ %0 = load i32, i32* %arrayidx9, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc11 = add nsw i64 %j.03, 1
@@ -181,7 +181,7 @@ for.body4:
%mul6 = shl i64 %n1, 1
%add = sub i64 %mul6, %j.03
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
@@ -242,7 +242,7 @@ for.body4:
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%sub5 = sub i64 %j.03, %n1
%arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %sub5
- %0 = load i32* %arrayidx6, align 4
+ %0 = load i32, i32* %arrayidx6, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc8 = add nsw i64 %j.03, 1
@@ -304,7 +304,7 @@ for.body4:
%B.addr.02 = phi i32* [ %incdec.ptr, %for.body4 ], [ %B, %for.body4.preheader ]
%add6 = sub i64 %n1, %j.03
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
@@ -366,7 +366,7 @@ for.body4:
%mul = shl i64 %n2, 1
%add6 = sub i64 %mul, %j.03
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc9 = add nsw i64 %j.03, 1
@@ -421,7 +421,7 @@ for.body3:
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %n2, 1
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %mul
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.12, i64 1
store i32 %0, i32* %B.addr.12, align 4
%inc = add nsw i64 %j.03, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicSIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicSIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicSIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/SymbolicSIV.ll Fri Feb 27 15:17:42 2015
@@ -35,7 +35,7 @@ for.body:
%mul14 = add i64 %i.03, %n
%add3 = mul i64 %mul14, 3
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %add3
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -82,7 +82,7 @@ for.body:
%mul3 = shl i64 %n, 1
%add4 = add i64 %mul2, %mul3
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 %add4
- %0 = load i32* %arrayidx5, align 4
+ %0 = load i32, i32* %arrayidx5, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -127,7 +127,7 @@ for.body:
%mul2 = shl i64 %n, 1
%add = sub i64 %mul2, %i.03
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -173,7 +173,7 @@ for.body:
%mul2 = shl i64 %n, 1
%sub = sub i64 %i.03, %mul2
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -218,7 +218,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%add2 = sub i64 %n, %i.03
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 %add2
- %0 = load i32* %arrayidx3, align 4
+ %0 = load i32, i32* %arrayidx3, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -264,7 +264,7 @@ for.body:
%sub2 = sub nsw i64 0, %i.03
%sub3 = sub i64 %sub2, %n
%arrayidx4 = getelementptr inbounds i32, i32* %A, i64 %sub3
- %0 = load i32* %arrayidx4, align 4
+ %0 = load i32, i32* %arrayidx4, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -310,7 +310,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 0, %i.03
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -359,7 +359,7 @@ for.body:
%add5 = add i64 %mul3, %mul4
%add6 = add i64 %add5, 1
%arrayidx7 = getelementptr inbounds i32, i32* %A, i64 %add6
- %0 = load i32* %arrayidx7, align 4
+ %0 = load i32, i32* %arrayidx7, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
@@ -408,7 +408,7 @@ for.body:
%sub = add i64 %mul3, %0
%add5 = add i64 %sub, 2
%arrayidx6 = getelementptr inbounds i32, i32* %A, i64 %add5
- %1 = load i32* %arrayidx6, align 4
+ %1 = load i32, i32* %arrayidx6, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add nsw i64 %i.03, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/WeakCrossingSIV.ll Fri Feb 27 15:17:42 2015
@@ -35,7 +35,7 @@ for.body:
%mul1 = mul i64 %i.03, %n
%sub = sub i64 1, %mul1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -80,7 +80,7 @@ for.body:
%add1 = add i64 %n, 1
%sub = sub i64 %add1, %i.03
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -118,7 +118,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 6, %i.02
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -153,7 +153,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 6, %i.02
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -188,7 +188,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 -6, %i.02
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -229,7 +229,7 @@ for.body:
%0 = mul i64 %i.03, -3
%sub = add i64 %0, 5
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %sub
- %1 = load i32* %arrayidx2, align 4
+ %1 = load i32, i32* %arrayidx2, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %1, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -268,7 +268,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%sub = sub i64 5, %i.02
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %sub
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroDstSIV.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -69,7 +69,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %add
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -107,7 +107,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -142,7 +142,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -177,7 +177,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -212,7 +212,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 -10
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -251,7 +251,7 @@ for.body:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
store i32 %conv, i32* %arrayidx, align 4
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 10
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/WeakZeroSrcSIV.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ for.body:
%mul = shl i64 %i.02, 1
%add = add i64 %mul, 10
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -69,7 +69,7 @@ for.body:
%mul = mul i64 %i.03, %n
%add = add i64 %mul, 10
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
@@ -107,7 +107,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -142,7 +142,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -177,7 +177,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -212,7 +212,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%mul = shl i64 %i.02, 1
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.01, i64 1
store i32 %0, i32* %B.addr.01, align 4
%inc = add i64 %i.02, 1
@@ -251,7 +251,7 @@ for.body:
store i32 %conv, i32* %arrayidx, align 4
%mul = mul i64 %i.03, 3
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %mul
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
%incdec.ptr = getelementptr inbounds i32, i32* %B.addr.02, i64 1
store i32 %0, i32* %B.addr.02, align 4
%inc = add i64 %i.03, 1
Modified: llvm/trunk/test/Analysis/DependenceAnalysis/ZIV.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/DependenceAnalysis/ZIV.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/DependenceAnalysis/ZIV.ll (original)
+++ llvm/trunk/test/Analysis/DependenceAnalysis/ZIV.ll Fri Feb 27 15:17:42 2015
@@ -23,7 +23,7 @@ entry:
%add1 = add i64 %n, 1
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 %add1
- %0 = load i32* %arrayidx2, align 4
+ %0 = load i32, i32* %arrayidx2, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -46,7 +46,7 @@ entry:
%add = add i64 %n, 1
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %add
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
store i32 %0, i32* %B, align 4
ret void
}
@@ -68,7 +68,7 @@ entry:
; CHECK: da analyze - none!
%arrayidx1 = getelementptr inbounds i32, i32* %A, i64 %m
- %0 = load i32* %arrayidx1, align 4
+ %0 = load i32, i32* %arrayidx1, align 4
store i32 %0, i32* %B, align 4
ret void
}
Modified: llvm/trunk/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll (original)
+++ llvm/trunk/test/Analysis/GlobalsModRef/2008-09-03-ReadGlobals.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
@g = internal global i32 0 ; <i32*> [#uses=2]
define i32 @r() {
- %tmp = load i32* @g ; <i32> [#uses=1]
+ %tmp = load i32, i32* @g ; <i32> [#uses=1]
ret i32 %tmp
}
Modified: llvm/trunk/test/Analysis/GlobalsModRef/aliastest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/GlobalsModRef/aliastest.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/GlobalsModRef/aliastest.ll (original)
+++ llvm/trunk/test/Analysis/GlobalsModRef/aliastest.ll Fri Feb 27 15:17:42 2015
@@ -9,6 +9,6 @@ define i32 @test(i32* %P) {
; CHECK-NEXT: ret i32 7
store i32 7, i32* %P
store i32 12, i32* @X
- %V = load i32* %P ; <i32> [#uses=1]
+ %V = load i32, i32* %P ; <i32> [#uses=1]
ret i32 %V
}
Modified: llvm/trunk/test/Analysis/GlobalsModRef/chaining-analysis.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/GlobalsModRef/chaining-analysis.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/GlobalsModRef/chaining-analysis.ll (original)
+++ llvm/trunk/test/Analysis/GlobalsModRef/chaining-analysis.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define i32 @test(i32* %P) {
; CHECK-NEXT: ret i32 12
store i32 12, i32* @X
call double @doesnotmodX( double 1.000000e+00 ) ; <double>:1 [#uses=0]
- %V = load i32* @X ; <i32> [#uses=1]
+ %V = load i32, i32* @X ; <i32> [#uses=1]
ret i32 %V
}
Modified: llvm/trunk/test/Analysis/GlobalsModRef/indirect-global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/GlobalsModRef/indirect-global.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/GlobalsModRef/indirect-global.ll (original)
+++ llvm/trunk/test/Analysis/GlobalsModRef/indirect-global.ll Fri Feb 27 15:17:42 2015
@@ -12,11 +12,11 @@ define void @test() {
define i32 @test1(i32* %P) {
; CHECK: ret i32 0
- %g1 = load i32** @G ; <i32*> [#uses=2]
- %h1 = load i32* %g1 ; <i32> [#uses=1]
+ %g1 = load i32*, i32** @G ; <i32*> [#uses=2]
+ %h1 = load i32, i32* %g1 ; <i32> [#uses=1]
store i32 123, i32* %P
- %g2 = load i32** @G ; <i32*> [#uses=0]
- %h2 = load i32* %g1 ; <i32> [#uses=1]
+ %g2 = load i32*, i32** @G ; <i32*> [#uses=0]
+ %h2 = load i32, i32* %g1 ; <i32> [#uses=1]
%X = sub i32 %h1, %h2 ; <i32> [#uses=1]
ret i32 %X
}
Modified: llvm/trunk/test/Analysis/GlobalsModRef/modreftest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/GlobalsModRef/modreftest.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/GlobalsModRef/modreftest.ll (original)
+++ llvm/trunk/test/Analysis/GlobalsModRef/modreftest.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define i32 @test(i32* %P) {
; CHECK-NEXT: ret i32 12
store i32 12, i32* @X
call void @doesnotmodX( )
- %V = load i32* @X ; <i32> [#uses=1]
+ %V = load i32, i32* @X ; <i32> [#uses=1]
ret i32 %V
}
Modified: llvm/trunk/test/Analysis/GlobalsModRef/pr12351.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/GlobalsModRef/pr12351.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/GlobalsModRef/pr12351.ll (original)
+++ llvm/trunk/test/Analysis/GlobalsModRef/pr12351.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define void @foo(i8* %x, i8* %y) {
define void @bar(i8* %y, i8* %z) {
%x = alloca i8
call void @foo(i8* %x, i8* %y)
- %t = load i8* %x
+ %t = load i8, i8* %x
store i8 %t, i8* %y
; CHECK: store i8 %t, i8* %y
ret void
@@ -19,8 +19,8 @@ define void @bar(i8* %y, i8* %z) {
define i32 @foo2() {
%foo = alloca i32
call void @bar2(i32* %foo)
- %t0 = load i32* %foo, align 4
-; CHECK: %t0 = load i32* %foo, align 4
+ %t0 = load i32, i32* %foo, align 4
+; CHECK: %t0 = load i32, i32* %foo, align 4
ret i32 %t0
}
Modified: llvm/trunk/test/Analysis/GlobalsModRef/volatile-instrs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/GlobalsModRef/volatile-instrs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/GlobalsModRef/volatile-instrs.ll (original)
+++ llvm/trunk/test/Analysis/GlobalsModRef/volatile-instrs.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ declare void @llvm.memcpy.p0i8.p0i8.i64(
define i32 @main() nounwind uwtable ssp {
main_entry:
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.anon* @b to i8*), i8* bitcast (%struct.anon* @a to i8*), i64 12, i32 4, i1 false)
- %0 = load volatile i32* getelementptr inbounds (%struct.anon* @b, i64 0, i32 0), align 4
+ %0 = load volatile i32, i32* getelementptr inbounds (%struct.anon* @b, i64 0, i32 0), align 4
store i32 %0, i32* @c, align 4
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* bitcast (%struct.anon* @b to i8*), i8* bitcast (%struct.anon* @a to i8*), i64 12, i32 4, i1 false) nounwind
%call = tail call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([4 x i8]* @.str, i64 0, i64 0), i32 %0) nounwind
Modified: llvm/trunk/test/Analysis/LazyCallGraph/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/LazyCallGraph/basic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/LazyCallGraph/basic.ll (original)
+++ llvm/trunk/test/Analysis/LazyCallGraph/basic.ll Fri Feb 27 15:17:42 2015
@@ -118,10 +118,10 @@ define void @test2() {
; CHECK-NEXT: -> f1
; CHECK-NOT: ->
- load i8** bitcast (void ()** @g to i8**)
- load i8** bitcast (void ()** getelementptr ([4 x void ()*]* @g1, i32 0, i32 2) to i8**)
- load i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**)
- load i8** bitcast (void ()** @h to i8**)
+ load i8*, i8** bitcast (void ()** @g to i8**)
+ load i8*, i8** bitcast (void ()** getelementptr ([4 x void ()*]* @g1, i32 0, i32 2) to i8**)
+ load i8*, i8** bitcast (void ()** getelementptr ({i8, void ()*, i8}* @g2, i32 0, i32 1) to i8**)
+ load i8*, i8** bitcast (void ()** @h to i8**)
ret void
}
Modified: llvm/trunk/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll (original)
+++ llvm/trunk/test/Analysis/LoopAccessAnalysis/backward-dep-different-types.ll Fri Feb 27 15:17:42 2015
@@ -20,18 +20,18 @@ target triple = "x86_64-apple-macosx10.1
define void @f() {
entry:
- %a = load i32** @A, align 8
- %b = load i32** @B, align 8
+ %a = load i32*, i32** @A, align 8
+ %b = load i32*, i32** @B, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
%arrayidxA = getelementptr inbounds i32, i32* %a, i64 %storemerge3
- %loadA = load i32* %arrayidxA, align 2
+ %loadA = load i32, i32* %arrayidxA, align 2
%arrayidxB = getelementptr inbounds i32, i32* %b, i64 %storemerge3
- %loadB = load i32* %arrayidxB, align 2
+ %loadB = load i32, i32* %arrayidxB, align 2
%mul = mul i32 %loadB, %loadA
Modified: llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll (original)
+++ llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks-no-dbg.ll Fri Feb 27 15:17:42 2015
@@ -28,22 +28,22 @@ target triple = "x86_64-apple-macosx10.1
define void @f() {
entry:
- %a = load i16** @A, align 8
- %b = load i16** @B, align 8
- %c = load i16** @C, align 8
+ %a = load i16*, i16** @A, align 8
+ %b = load i16*, i16** @B, align 8
+ %c = load i16*, i16** @C, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
%arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
- %loadA = load i16* %arrayidxA, align 2
+ %loadA = load i16, i16* %arrayidxA, align 2
%arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
- %loadB = load i16* %arrayidxB, align 2
+ %loadB = load i16, i16* %arrayidxB, align 2
%arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
- %loadC = load i16* %arrayidxC, align 2
+ %loadC = load i16, i16* %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
Modified: llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll (original)
+++ llvm/trunk/test/Analysis/LoopAccessAnalysis/unsafe-and-rt-checks.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ target triple = "x86_64-apple-macosx10.1
; CHECK: Report: unsafe dependent memory operations in loop
-; DEBUG: LAA: Distance for %loadA = load i16* %arrayidxA, align 2 to store i16 %mul1, i16* %arrayidxA_plus_2, align 2: 2
+; DEBUG: LAA: Distance for %loadA = load i16, i16* %arrayidxA, align 2 to store i16 %mul1, i16* %arrayidxA_plus_2, align 2: 2
; DEBUG-NEXT: LAA: Failure because of Positive distance 2
; CHECK: Run-time memory checks:
@@ -29,22 +29,22 @@ target triple = "x86_64-apple-macosx10.1
define void @f() {
entry:
- %a = load i16** @A, align 8
- %b = load i16** @B, align 8
- %c = load i16** @C, align 8
+ %a = load i16*, i16** @A, align 8
+ %b = load i16*, i16** @B, align 8
+ %c = load i16*, i16** @C, align 8
br label %for.body
for.body: ; preds = %for.body, %entry
%storemerge3 = phi i64 [ 0, %entry ], [ %add, %for.body ]
%arrayidxA = getelementptr inbounds i16, i16* %a, i64 %storemerge3
- %loadA = load i16* %arrayidxA, align 2
+ %loadA = load i16, i16* %arrayidxA, align 2
%arrayidxB = getelementptr inbounds i16, i16* %b, i64 %storemerge3
- %loadB = load i16* %arrayidxB, align 2
+ %loadB = load i16, i16* %arrayidxB, align 2
%arrayidxC = getelementptr inbounds i16, i16* %c, i64 %storemerge3
- %loadC = load i16* %arrayidxC, align 2
+ %loadC = load i16, i16* %arrayidxC, align 2
%mul = mul i16 %loadB, %loadA
%mul1 = mul i16 %mul, %loadC
Modified: llvm/trunk/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll (original)
+++ llvm/trunk/test/Analysis/MemoryDependenceAnalysis/memdep_requires_dominator_tree.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ for.body:
%i.01 = phi i32 [ 0, %entry ], [ %tmp8.7, %for.body ]
%arrayidx = getelementptr i32, i32* %bufUInt, i32 %i.01
%arrayidx5 = getelementptr i32, i32* %pattern, i32 %i.01
- %tmp6 = load i32* %arrayidx5, align 4
+ %tmp6 = load i32, i32* %arrayidx5, align 4
store i32 %tmp6, i32* %arrayidx, align 4
%tmp8.7 = add i32 %i.01, 8
%cmp.7 = icmp ult i32 %tmp8.7, 1024
Modified: llvm/trunk/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/2008-07-12-UnneededSelect1.ll Fri Feb 27 15:17:42 2015
@@ -16,11 +16,11 @@ bb.nph: ; preds = %entry
bb: ; preds = %bb1, %bb.nph
%j.01 = phi i32 [ %8, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=1]
- load i32* %srcptr, align 4 ; <i32>:1 [#uses=2]
+ load i32, i32* %srcptr, align 4 ; <i32>:1 [#uses=2]
and i32 %1, 255 ; <i32>:2 [#uses=1]
and i32 %1, -256 ; <i32>:3 [#uses=1]
getelementptr [256 x i8], [256 x i8]* @lut, i32 0, i32 %2 ; <i8*>:4 [#uses=1]
- load i8* %4, align 1 ; <i8>:5 [#uses=1]
+ load i8, i8* %4, align 1 ; <i8>:5 [#uses=1]
zext i8 %5 to i32 ; <i32>:6 [#uses=1]
or i32 %6, %3 ; <i32>:7 [#uses=1]
store i32 %7, i32* %dstptr, align 4
Modified: llvm/trunk/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/2008-12-08-FiniteSGE.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ bb1: ; preds = %bb1, %bb1.thread
%indvar = phi i32 [ 0, %bb1.thread ], [ %indvar.next, %bb1 ] ; <i32> [#uses=4]
%i.0.reg2mem.0 = sub i32 255, %indvar ; <i32> [#uses=2]
%0 = getelementptr i32, i32* %alp, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
- %1 = load i32* %0, align 4 ; <i32> [#uses=1]
+ %1 = load i32, i32* %0, align 4 ; <i32> [#uses=1]
%2 = getelementptr i32, i32* %lam, i32 %i.0.reg2mem.0 ; <i32*> [#uses=1]
store i32 %1, i32* %2, align 4
%3 = sub i32 254, %indvar ; <i32> [#uses=1]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/2009-01-02-SignedNegativeStride.ll Fri Feb 27 15:17:42 2015
@@ -9,12 +9,12 @@
define void @func_15() nounwind {
entry:
- %0 = load i16* @g_16, align 2 ; <i16> [#uses=1]
+ %0 = load i16, i16* @g_16, align 2 ; <i16> [#uses=1]
%1 = icmp sgt i16 %0, 0 ; <i1> [#uses=1]
br i1 %1, label %bb2, label %bb.nph
bb.nph: ; preds = %entry
- %g_16.promoted = load i16* @g_16 ; <i16> [#uses=1]
+ %g_16.promoted = load i16, i16* @g_16 ; <i16> [#uses=1]
br label %bb
bb: ; preds = %bb1, %bb.nph
Modified: llvm/trunk/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/2009-07-04-GroupConstantsWidthMismatch.ll Fri Feb 27 15:17:42 2015
@@ -3,11 +3,11 @@
define void @test() {
entry:
- %0 = load i16* undef, align 1
+ %0 = load i16, i16* undef, align 1
%1 = lshr i16 %0, 8
%2 = and i16 %1, 3
%3 = zext i16 %2 to i32
- %4 = load i8* undef, align 1
+ %4 = load i8, i8* undef, align 1
%5 = lshr i8 %4, 4
%6 = and i8 %5, 1
%7 = zext i8 %6 to i32
Modified: llvm/trunk/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/2012-03-26-LoadConstant.ll Fri Feb 27 15:17:42 2015
@@ -19,20 +19,20 @@ lbl_818:
br label %for.cond
for.cond: ; preds = %for.body, %lbl_818
- %0 = load i32* @g_814, align 4
+ %0 = load i32, i32* @g_814, align 4
%cmp = icmp sle i32 %0, 0
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
%idxprom = sext i32 %0 to i64
%arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* getelementptr inbounds ([1 x [0 x i32]]* @g_244, i32 0, i64 0), i32 0, i64 %idxprom
- %1 = load i32* %arrayidx, align 1
+ %1 = load i32, i32* %arrayidx, align 1
store i32 %1, i32* @func_21_l_773, align 4
store i32 1, i32* @g_814, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %2 = load i32* @func_21_l_773, align 4
+ %2 = load i32, i32* @func_21_l_773, align 4
%tobool = icmp ne i32 %2, 0
br i1 %tobool, label %lbl_818, label %if.end
Modified: llvm/trunk/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/avoid-infinite-recursion-0.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gn
define i32 @test() {
entry:
- %0 = load i32** undef, align 8 ; <i32*> [#uses=1]
+ %0 = load i32*, i32** undef, align 8 ; <i32*> [#uses=1]
%1 = ptrtoint i32* %0 to i64 ; <i64> [#uses=1]
%2 = sub i64 undef, %1 ; <i64> [#uses=1]
%3 = lshr i64 %2, 3 ; <i64> [#uses=1]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-0.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-0.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ entry:
br i1 %0, label %bb, label %return
bb:
- load i32* %q, align 4
+ load i32, i32* %q, align 4
icmp eq i32 %1, 0
br i1 %2, label %return, label %bb3.preheader
@@ -21,7 +21,7 @@ bb3.preheader:
bb3:
%i.0 = phi i32 [ %7, %bb3 ], [ 0, %bb3.preheader ]
getelementptr i32, i32* %p, i32 %i.0
- load i32* %3, align 4
+ load i32, i32* %3, align 4
add i32 %4, 1
getelementptr i32, i32* %p, i32 %i.0
store i32 %5, i32* %6, align 4
Modified: llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-1.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/avoid-smax-1.ll Fri Feb 27 15:17:42 2015
@@ -36,7 +36,7 @@ bb6: ; preds = %bb7, %bb.nph7
%8 = shl i32 %x.06, 1 ; <i32> [#uses=1]
%9 = add i32 %6, %8 ; <i32> [#uses=1]
%10 = getelementptr i8, i8* %r, i32 %9 ; <i8*> [#uses=1]
- %11 = load i8* %10, align 1 ; <i8> [#uses=1]
+ %11 = load i8, i8* %10, align 1 ; <i8> [#uses=1]
%12 = getelementptr i8, i8* %j, i32 %7 ; <i8*> [#uses=1]
store i8 %11, i8* %12, align 1
%13 = add i32 %x.06, 1 ; <i32> [#uses=2]
@@ -103,7 +103,7 @@ bb14: ; preds = %bb15, %bb.nph3
%29 = shl i32 %x.12, 2 ; <i32> [#uses=1]
%30 = add i32 %29, %25 ; <i32> [#uses=1]
%31 = getelementptr i8, i8* %r, i32 %30 ; <i8*> [#uses=1]
- %32 = load i8* %31, align 1 ; <i8> [#uses=1]
+ %32 = load i8, i8* %31, align 1 ; <i8> [#uses=1]
%.sum = add i32 %26, %x.12 ; <i32> [#uses=1]
%33 = getelementptr i8, i8* %j, i32 %.sum ; <i8*> [#uses=1]
store i8 %32, i8* %33, align 1
@@ -111,7 +111,7 @@ bb14: ; preds = %bb15, %bb.nph3
%35 = or i32 %34, 2 ; <i32> [#uses=1]
%36 = add i32 %35, %25 ; <i32> [#uses=1]
%37 = getelementptr i8, i8* %r, i32 %36 ; <i8*> [#uses=1]
- %38 = load i8* %37, align 1 ; <i8> [#uses=1]
+ %38 = load i8, i8* %37, align 1 ; <i8> [#uses=1]
%.sum6 = add i32 %27, %x.12 ; <i32> [#uses=1]
%39 = getelementptr i8, i8* %j, i32 %.sum6 ; <i8*> [#uses=1]
store i8 %38, i8* %39, align 1
Modified: llvm/trunk/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/infer-prestart-no-wrap.ll Fri Feb 27 15:17:42 2015
@@ -53,7 +53,7 @@ define void @infer.sext.1(i32 %start, i1
; CHECK: %idx.sext = sext i32 %idx to i64
; CHECK-NEXT: --> {(2 + (sext i32 (4 * %start) to i64)),+,2}<nsw><%loop>
%idx.inc = add nsw i32 %idx, 2
- %condition = load i1* %c
+ %condition = load i1, i1* %c
br i1 %condition, label %exit, label %loop
exit:
@@ -73,7 +73,7 @@ define void @infer.sext.2(i1* %c, i8 %st
; CHECK: %idx.sext = sext i8 %idx to i16
; CHECK-NEXT: --> {(1 + (sext i8 %start to i16)),+,1}<nsw><%loop>
%idx.inc = add nsw i8 %idx, 1
- %condition = load volatile i1* %c
+ %condition = load volatile i1, i1* %c
br i1 %condition, label %exit, label %loop
exit:
@@ -93,7 +93,7 @@ define void @infer.zext.1(i1* %c, i8 %st
; CHECK: %idx.zext = zext i8 %idx to i16
; CHECK-NEXT: --> {(1 + (zext i8 %start to i16)),+,1}<nuw><%loop>
%idx.inc = add nuw i8 %idx, 1
- %condition = load volatile i1* %c
+ %condition = load volatile i1, i1* %c
br i1 %condition, label %exit, label %loop
exit:
Modified: llvm/trunk/test/Analysis/ScalarEvolution/load-with-range-metadata.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/load-with-range-metadata.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/load-with-range-metadata.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/load-with-range-metadata.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
define i32 @slt_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
; CHECK-LABEL: slt_trip_count_with_range
entry:
- %limit = load i32* %ptr0, !range !0
+ %limit = load i32, i32* %ptr0, !range !0
br label %loop
loop:
@@ -20,7 +20,7 @@ define i32 @slt_trip_count_with_range(i3
define i32 @ult_trip_count_with_range(i32 *%ptr0, i32 *%ptr1) {
; CHECK-LABEL: ult_trip_count_with_range
entry:
- %limit = load i32* %ptr0, !range !0
+ %limit = load i32, i32* %ptr0, !range !0
br label %loop
loop:
Modified: llvm/trunk/test/Analysis/ScalarEvolution/load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/load.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/load.ll Fri Feb 27 15:17:42 2015
@@ -17,10 +17,10 @@ for.body:
; CHECK: --> %sum.04{{ *}}Exits: 2450
%i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
%arrayidx = getelementptr inbounds [50 x i32], [50 x i32]* @arr1, i32 0, i32 %i.03
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
; CHECK: --> %0{{ *}}Exits: 50
%arrayidx1 = getelementptr inbounds [50 x i32], [50 x i32]* @arr2, i32 0, i32 %i.03
- %1 = load i32* %arrayidx1, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
; CHECK: --> %1{{ *}}Exits: 0
%add = add i32 %0, %sum.04
%add2 = add i32 %add, %1
@@ -52,10 +52,10 @@ for.body:
%n.01 = phi %struct.ListNode* [ bitcast ({ %struct.ListNode*, i32, [4 x i8] }* @node5 to %struct.ListNode*), %entry ], [ %1, %for.body ]
; CHECK: --> %n.01{{ *}}Exits: @node1
%i = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 1
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%add = add nsw i32 %0, %sum.02
%next = getelementptr inbounds %struct.ListNode, %struct.ListNode* %n.01, i64 0, i32 0
- %1 = load %struct.ListNode** %next, align 8
+ %1 = load %struct.ListNode*, %struct.ListNode** %next, align 8
; CHECK: --> %1{{ *}}Exits: 0
%cmp = icmp eq %struct.ListNode* %1, null
br i1 %cmp, label %for.end, label %for.body
Modified: llvm/trunk/test/Analysis/ScalarEvolution/max-trip-count.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/max-trip-count.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/max-trip-count.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/max-trip-count.ll Fri Feb 27 15:17:42 2015
@@ -174,7 +174,7 @@ for.body.i:
for.cond.i: ; preds = %for.body.i
store i32 %add.i.i, i32* @a, align 4
- %ld = load volatile i32* @b
+ %ld = load volatile i32, i32* @b
%cmp.i = icmp ne i32 %ld, 0
br i1 %cmp.i, label %for.body.i, label %bar.exit
Modified: llvm/trunk/test/Analysis/ScalarEvolution/min-max-exprs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/min-max-exprs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/min-max-exprs.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/min-max-exprs.ll Fri Feb 27 15:17:42 2015
@@ -35,7 +35,7 @@ bb2:
; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6
; CHECK-NEXT: --> (-1 + (-1 * ((-1 + (-1 * (sext i32 {3,+,1}<nw><%bb1> to i64))) smax (-1 + (-1 * (sext i32 %N to i64))))))
%tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9
- %tmp12 = load i32* %tmp11, align 4
+ %tmp12 = load i32, i32* %tmp11, align 4
%tmp13 = shl nsw i32 %tmp12, 1
%tmp14 = icmp sge i32 3, %i.0
%tmp17 = add nsw i64 %i.0.1, -3
Modified: llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset-assume.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset-assume.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset-assume.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset-assume.ll Fri Feb 27 15:17:42 2015
@@ -28,10 +28,10 @@ bb:
; CHECK: --> {%d,+,16}<nsw><%bb>
%2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
- %3 = load double* %2, align 8 ; <double> [#uses=1]
+ %3 = load double, double* %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
- %6 = load double* %5, align 8 ; <double> [#uses=1]
+ %6 = load double, double* %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
; CHECK: %8 = sext i32 %7 to i64
@@ -54,7 +54,7 @@ bb:
; CHECK: {(8 + %q),+,16}<nsw><%bb>
%t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
- %10 = load double* %9, align 8 ; <double> [#uses=1]
+ %10 = load double, double* %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/nsw-offset.ll Fri Feb 27 15:17:42 2015
@@ -26,10 +26,10 @@ bb:
; CHECK: --> {%d,+,16}<nsw><%bb>
%2 = getelementptr inbounds double, double* %d, i64 %1 ; <double*> [#uses=1]
- %3 = load double* %2, align 8 ; <double> [#uses=1]
+ %3 = load double, double* %2, align 8 ; <double> [#uses=1]
%4 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%5 = getelementptr inbounds double, double* %q, i64 %4 ; <double*> [#uses=1]
- %6 = load double* %5, align 8 ; <double> [#uses=1]
+ %6 = load double, double* %5, align 8 ; <double> [#uses=1]
%7 = or i32 %i.01, 1 ; <i32> [#uses=1]
; CHECK: %8 = sext i32 %7 to i64
@@ -52,7 +52,7 @@ bb:
; CHECK: {(8 + %q),+,16}<nsw><%bb>
%t9 = getelementptr inbounds double, double* %q, i64 %t8 ; <double*> [#uses=1]
- %10 = load double* %9, align 8 ; <double> [#uses=1]
+ %10 = load double, double* %9, align 8 ; <double> [#uses=1]
%11 = fadd double %6, %10 ; <double> [#uses=1]
%12 = fadd double %11, 3.200000e+00 ; <double> [#uses=1]
%13 = fmul double %3, %12 ; <double> [#uses=1]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/nsw.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/nsw.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/nsw.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/nsw.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ target datalayout = "e-p:64:64:64-i1:8:8
; CHECK: Classifying expressions for: @test1
define void @test1(double* %p) nounwind {
entry:
- %tmp = load double* %p, align 8 ; <double> [#uses=1]
+ %tmp = load double, double* %p, align 8 ; <double> [#uses=1]
%tmp1 = fcmp ogt double %tmp, 2.000000e+00 ; <i1> [#uses=1]
br i1 %tmp1, label %bb.nph, label %return
@@ -20,7 +20,7 @@ bb: ; preds = %bb1, %bb.nph
; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb>
%tmp2 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%tmp3 = getelementptr double, double* %p, i64 %tmp2 ; <double*> [#uses=1]
- %tmp4 = load double* %tmp3, align 8 ; <double> [#uses=1]
+ %tmp4 = load double, double* %tmp3, align 8 ; <double> [#uses=1]
%tmp5 = fmul double %tmp4, 9.200000e+00 ; <double> [#uses=1]
%tmp6 = sext i32 %i.01 to i64 ; <i64> [#uses=1]
%tmp7 = getelementptr double, double* %p, i64 %tmp6 ; <double*> [#uses=1]
@@ -39,7 +39,7 @@ bb1: ; preds = %bb
%tmp9 = getelementptr double, double* %p, i64 %phitmp ; <double*> [#uses=1]
; CHECK: %tmp9
; CHECK-NEXT: --> {(8 + %p),+,8}<%bb>
- %tmp10 = load double* %tmp9, align 8 ; <double> [#uses=1]
+ %tmp10 = load double, double* %tmp9, align 8 ; <double> [#uses=1]
%tmp11 = fcmp ogt double %tmp10, 2.000000e+00 ; <i1> [#uses=1]
br i1 %tmp11, label %bb, label %bb1.return_crit_edge
Modified: llvm/trunk/test/Analysis/ScalarEvolution/pr22179.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/pr22179.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/pr22179.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/pr22179.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define i32 @main() {
loop:
%storemerge1 = phi i8 [ 0, %0 ], [ %inc, %loop ]
- %m = load volatile i32* getelementptr inbounds (%struct.S* @b, i64 0, i32 0), align 4
+ %m = load volatile i32, i32* getelementptr inbounds (%struct.S* @b, i64 0, i32 0), align 4
%inc = add nuw i8 %storemerge1, 1
; CHECK: %inc = add nuw i8 %storemerge1, 1
; CHECK-NEXT: --> {1,+,1}<nuw><%loop>
Modified: llvm/trunk/test/Analysis/ScalarEvolution/pr22674.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/pr22674.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/pr22674.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/pr22674.ll Fri Feb 27 15:17:42 2015
@@ -45,9 +45,9 @@ cond.false:
_ZNK4llvm12AttributeSet3endEj.exit: ; preds = %for.end
%second.i.i.i = getelementptr inbounds %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507", %"struct.std::pair.241.2040.3839.6152.6923.7694.8465.9493.10007.10264.18507"* undef, i32 %I.099.lcssa129, i32 1
- %0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2
+ %0 = load %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"*, %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"** %second.i.i.i, align 4, !tbaa !2
%NumAttrs.i.i.i = getelementptr inbounds %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506", %"class.llvm::AttributeSetNode.230.2029.3828.6141.6912.7683.8454.9482.9996.10253.18506"* %0, i32 0, i32 1
- %1 = load i32* %NumAttrs.i.i.i, align 4, !tbaa !8
+ %1 = load i32, i32* %NumAttrs.i.i.i, align 4, !tbaa !8
%add.ptr.i.i.i55 = getelementptr inbounds %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509", %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* undef, i32 %1
br i1 undef, label %return, label %for.body11
@@ -58,7 +58,7 @@ for.cond9:
for.body11: ; preds = %for.cond9, %_ZNK4llvm12AttributeSet3endEj.exit
%I5.096 = phi %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* [ %incdec.ptr, %for.cond9 ], [ undef, %_ZNK4llvm12AttributeSet3endEj.exit ]
%2 = bitcast %"class.llvm::Attribute.222.2021.3820.6133.6904.7675.8446.9474.9988.10245.18509"* %I5.096 to i32*
- %3 = load i32* %2, align 4, !tbaa !10
+ %3 = load i32, i32* %2, align 4, !tbaa !10
%tobool.i59 = icmp eq i32 %3, 0
br i1 %tobool.i59, label %cond.false21, label %_ZNK4llvm9Attribute15isEnumAttributeEv.exit
Modified: llvm/trunk/test/Analysis/ScalarEvolution/scev-aa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/scev-aa.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/scev-aa.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/scev-aa.ll Fri Feb 27 15:17:42 2015
@@ -22,8 +22,8 @@ bb:
%pi = getelementptr double, double* %p, i64 %i
%i.next = add i64 %i, 1
%pi.next = getelementptr double, double* %p, i64 %i.next
- %x = load double* %pi
- %y = load double* %pi.next
+ %x = load double, double* %pi
+ %y = load double, double* %pi.next
%z = fmul double %x, %y
store double %z, double* %pi
%exitcond = icmp eq i64 %i.next, %n
@@ -61,15 +61,15 @@ bb:
%pi.j = getelementptr double, double* %p, i64 %e
%f = add i64 %i.next, %j
%pi.next.j = getelementptr double, double* %p, i64 %f
- %x = load double* %pi.j
- %y = load double* %pi.next.j
+ %x = load double, double* %pi.j
+ %y = load double, double* %pi.next.j
%z = fmul double %x, %y
store double %z, double* %pi.j
%o = add i64 %j, 91
%g = add i64 %i, %o
%pi.j.next = getelementptr double, double* %p, i64 %g
- %a = load double* %pi.j.next
+ %a = load double, double* %pi.j.next
%b = fmul double %x, %a
store double %b, double* %pi.j.next
@@ -118,15 +118,15 @@ bb:
%pi.j = getelementptr double, double* %p, i64 %e
%f = add i64 %i.next, %j
%pi.next.j = getelementptr double, double* %p, i64 %f
- %x = load double* %pi.j
- %y = load double* %pi.next.j
+ %x = load double, double* %pi.j
+ %y = load double, double* %pi.next.j
%z = fmul double %x, %y
store double %z, double* %pi.j
%o = add i64 %j, %n
%g = add i64 %i, %o
%pi.j.next = getelementptr double, double* %p, i64 %g
- %a = load double* %pi.j.next
+ %a = load double, double* %pi.j.next
%b = fmul double %x, %a
store double %b, double* %pi.j.next
@@ -202,7 +202,7 @@ for.body:
%inc = add nsw i64 %i, 1 ; <i64> [#uses=2]
%arrayidx = getelementptr inbounds i64, i64* %p, i64 %inc
store i64 0, i64* %arrayidx
- %tmp6 = load i64* %p ; <i64> [#uses=1]
+ %tmp6 = load i64, i64* %p ; <i64> [#uses=1]
%cmp = icmp slt i64 %inc, %tmp6 ; <i1> [#uses=1]
br i1 %cmp, label %for.body, label %for.end
Modified: llvm/trunk/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/scev-prestart-nowrap.ll Fri Feb 27 15:17:42 2015
@@ -66,7 +66,7 @@ define i64 @bad.1(i32 %start, i32 %low.l
br i1 %break.early, label %continue.1, label %early.exit
continue.1:
- %cond = load volatile i1* %unknown
+ %cond = load volatile i1, i1* %unknown
%idx.inc = add nsw i32 %idx, 1
br i1 %cond, label %loop, label %continue
Modified: llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-0.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-0.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-0.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ bb1: ; preds = %bb1, %bb1.thread
; CHECK: %2
; CHECK-NEXT: --> {-128,+,1}<nsw><%bb1> Exits: 127
%3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
%7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-1.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-1.ll Fri Feb 27 15:17:42 2015
@@ -24,7 +24,7 @@ bb1: ; preds = %bb1, %bb1.thread
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
%3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i7 %0 to i64 ; <i64> [#uses=1]
%7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
@@ -47,7 +47,7 @@ bb1: ; preds = %bb1, %bb1.thread
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
%3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
%7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
@@ -70,7 +70,7 @@ bb1: ; preds = %bb1, %bb1.thread
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
%3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
%7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
@@ -93,7 +93,7 @@ bb1: ; preds = %bb1, %bb1.thread
%1 = trunc i64 %i.0.reg2mem.0 to i9 ; <i8> [#uses=1]
%2 = sext i9 %1 to i64 ; <i64> [#uses=1]
%3 = getelementptr double, double* %x, i64 %2 ; <double*> [#uses=1]
- %4 = load double* %3, align 8 ; <double> [#uses=1]
+ %4 = load double, double* %3, align 8 ; <double> [#uses=1]
%5 = fmul double %4, 3.900000e+00 ; <double> [#uses=1]
%6 = sext i8 %0 to i64 ; <i64> [#uses=1]
%7 = getelementptr double, double* %x, i64 %6 ; <double*> [#uses=1]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-2.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/sext-iv-2.ll Fri Feb 27 15:17:42 2015
@@ -56,7 +56,7 @@ bb4.bb5_crit_edge: ; preds = %bb4
br label %bb5
bb5: ; preds = %bb4.bb5_crit_edge, %entry
- %tmp12 = load i32* getelementptr ([32 x [256 x i32]]* @table, i64 0, i64 9, i64 132), align 16 ; <i32> [#uses=1]
+ %tmp12 = load i32, i32* getelementptr ([32 x [256 x i32]]* @table, i64 0, i64 9, i64 132), align 16 ; <i32> [#uses=1]
%tmp13 = icmp eq i32 %tmp12, -1116 ; <i1> [#uses=1]
br i1 %tmp13, label %bb7, label %bb6
Modified: llvm/trunk/test/Analysis/ScalarEvolution/sle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/sle.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/sle.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/sle.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ entry:
for.body: ; preds = %for.body, %entry
%i = phi i64 [ %i.next, %for.body ], [ 0, %entry ] ; <i64> [#uses=2]
%arrayidx = getelementptr double, double* %p, i64 %i ; <double*> [#uses=2]
- %t4 = load double* %arrayidx ; <double> [#uses=1]
+ %t4 = load double, double* %arrayidx ; <double> [#uses=1]
%mul = fmul double %t4, 2.200000e+00 ; <double> [#uses=1]
store double %mul, double* %arrayidx
%i.next = add nsw i64 %i, 1 ; <i64> [#uses=2]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/trip-count11.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/trip-count11.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/trip-count11.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/trip-count11.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ for.cond:
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* @foo.a, i64 0, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
@@ -44,7 +44,7 @@ for.cond:
for.inc: ; preds = %for.cond
%idxprom = sext i32 %i.0 to i64
%arrayidx = getelementptr inbounds [8 x i32], [8 x i32] addrspace(1)* @foo.a_as1, i64 0, i64 %idxprom
- %0 = load i32 addrspace(1)* %arrayidx, align 4
+ %0 = load i32, i32 addrspace(1)* %arrayidx, align 4
%add = add nsw i32 %sum.0, %0
%inc = add nsw i32 %i.0, 1
br label %for.cond
Modified: llvm/trunk/test/Analysis/ScalarEvolution/trip-count12.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/trip-count12.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/trip-count12.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/trip-count12.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ for.body:
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %for.body.preheader ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %for.body.preheader ]
%incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i32 1
- %0 = load i16* %p.addr.05, align 2
+ %0 = load i16, i16* %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
%sub = add nsw i32 %len.addr.04, -2
Modified: llvm/trunk/test/Analysis/ScalarEvolution/trip-count4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/trip-count4.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/trip-count4.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/trip-count4.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ loop: ; preds = %loop, %entry
%s0 = shl i64 %indvar, 8 ; <i64> [#uses=1]
%indvar.i8 = ashr i64 %s0, 8 ; <i64> [#uses=1]
%t0 = getelementptr double, double* %d, i64 %indvar.i8 ; <double*> [#uses=2]
- %t1 = load double* %t0 ; <double> [#uses=1]
+ %t1 = load double, double* %t0 ; <double> [#uses=1]
%t2 = fmul double %t1, 1.000000e-01 ; <double> [#uses=1]
store double %t2, double* %t0
%indvar.next = sub i64 %indvar, 1 ; <i64> [#uses=2]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/trip-count5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/trip-count5.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/trip-count5.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/trip-count5.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ target datalayout = "e-p:64:64:64-i1:8:8
define float @t(float* %pTmp1, float* %peakWeight, float* %nrgReducePeakrate, i32 %bim) nounwind {
entry:
- %tmp3 = load float* %peakWeight, align 4 ; <float> [#uses=2]
+ %tmp3 = load float, float* %peakWeight, align 4 ; <float> [#uses=2]
%tmp2538 = icmp sgt i32 %bim, 0 ; <i1> [#uses=1]
br i1 %tmp2538, label %bb.nph, label %bb4
@@ -22,12 +22,12 @@ bb: ; preds = %bb1, %bb.nph
%peakCount.034 = phi float [ %tmp19, %bb1 ], [ %tmp3, %bb.nph ] ; <float> [#uses=1]
%tmp6 = sext i32 %hiPart.035 to i64 ; <i64> [#uses=1]
%tmp7 = getelementptr float, float* %pTmp1, i64 %tmp6 ; <float*> [#uses=1]
- %tmp8 = load float* %tmp7, align 4 ; <float> [#uses=1]
+ %tmp8 = load float, float* %tmp7, align 4 ; <float> [#uses=1]
%tmp10 = fadd float %tmp8, %distERBhi.036 ; <float> [#uses=3]
%tmp12 = add i32 %hiPart.035, 1 ; <i32> [#uses=3]
%tmp15 = sext i32 %tmp12 to i64 ; <i64> [#uses=1]
%tmp16 = getelementptr float, float* %peakWeight, i64 %tmp15 ; <float*> [#uses=1]
- %tmp17 = load float* %tmp16, align 4 ; <float> [#uses=1]
+ %tmp17 = load float, float* %tmp16, align 4 ; <float> [#uses=1]
%tmp19 = fadd float %tmp17, %peakCount.034 ; <float> [#uses=2]
br label %bb1
Modified: llvm/trunk/test/Analysis/ScalarEvolution/trip-count6.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/trip-count6.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/trip-count6.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/trip-count6.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ bb: ; preds = %bb4, %entry
%mode.0 = phi i8 [ 0, %entry ], [ %indvar.next, %bb4 ] ; <i8> [#uses=4]
zext i8 %mode.0 to i32 ; <i32>:1 [#uses=1]
getelementptr [4 x i32], [4 x i32]* @mode_table, i32 0, i32 %1 ; <i32*>:2 [#uses=1]
- load i32* %2, align 4 ; <i32>:3 [#uses=1]
+ load i32, i32* %2, align 4 ; <i32>:3 [#uses=1]
icmp eq i32 %3, %0 ; <i1>:4 [#uses=1]
br i1 %4, label %bb1, label %bb2
Modified: llvm/trunk/test/Analysis/ScalarEvolution/trip-count7.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/trip-count7.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/trip-count7.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/trip-count7.ll Fri Feb 27 15:17:42 2015
@@ -73,7 +73,7 @@ bb.i: ; preds = %bb7.i
store i32 0, i32* %q, align 4
%tmp1 = sext i32 %tmp to i64 ; <i64> [#uses=1]
%tmp2 = getelementptr [9 x i32], [9 x i32]* %a, i64 0, i64 %tmp1 ; <i32*> [#uses=1]
- %tmp3 = load i32* %tmp2, align 4 ; <i32> [#uses=1]
+ %tmp3 = load i32, i32* %tmp2, align 4 ; <i32> [#uses=1]
%tmp4 = icmp eq i32 %tmp3, 0 ; <i1> [#uses=1]
br i1 %tmp4, label %bb.i.bb7.i.backedge_crit_edge, label %bb1.i
@@ -81,7 +81,7 @@ bb1.i: ; preds = %bb.i
%tmp5 = add i32 %j.0.i, 2 ; <i32> [#uses=1]
%tmp6 = sext i32 %tmp5 to i64 ; <i64> [#uses=1]
%tmp7 = getelementptr [17 x i32], [17 x i32]* %b, i64 0, i64 %tmp6 ; <i32*> [#uses=1]
- %tmp8 = load i32* %tmp7, align 4 ; <i32> [#uses=1]
+ %tmp8 = load i32, i32* %tmp7, align 4 ; <i32> [#uses=1]
%tmp9 = icmp eq i32 %tmp8, 0 ; <i1> [#uses=1]
br i1 %tmp9, label %bb1.i.bb7.i.backedge_crit_edge, label %bb2.i
@@ -89,7 +89,7 @@ bb2.i: ; preds = %bb1.i
%tmp10 = sub i32 7, %j.0.i ; <i32> [#uses=1]
%tmp11 = sext i32 %tmp10 to i64 ; <i64> [#uses=1]
%tmp12 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp11 ; <i32*> [#uses=1]
- %tmp13 = load i32* %tmp12, align 4 ; <i32> [#uses=1]
+ %tmp13 = load i32, i32* %tmp12, align 4 ; <i32> [#uses=1]
%tmp14 = icmp eq i32 %tmp13, 0 ; <i1> [#uses=1]
br i1 %tmp14, label %bb2.i.bb7.i.backedge_crit_edge, label %bb3.i
@@ -108,7 +108,7 @@ bb3.i: ; preds = %bb2.i
%tmp23 = getelementptr [15 x i32], [15 x i32]* %c, i64 0, i64 %tmp22 ; <i32*> [#uses=1]
store i32 0, i32* %tmp23, align 4
call void @Try(i32 2, i32* %q, i32* %b9, i32* %a10, i32* %c11, i32* %x1.sub) nounwind
- %tmp24 = load i32* %q, align 4 ; <i32> [#uses=1]
+ %tmp24 = load i32, i32* %q, align 4 ; <i32> [#uses=1]
%tmp25 = icmp eq i32 %tmp24, 0 ; <i1> [#uses=1]
br i1 %tmp25, label %bb5.i, label %bb3.i.bb7.i.backedge_crit_edge
@@ -131,7 +131,7 @@ bb7.i.backedge: ; preds = %bb3.i.bb7.i.
bb7.i: ; preds = %bb7.i.backedge, %newFuncRoot
%j.0.i = phi i32 [ 0, %newFuncRoot ], [ %tmp, %bb7.i.backedge ] ; <i32> [#uses=8]
- %tmp34 = load i32* %q, align 4 ; <i32> [#uses=1]
+ %tmp34 = load i32, i32* %q, align 4 ; <i32> [#uses=1]
%tmp35 = icmp eq i32 %tmp34, 0 ; <i1> [#uses=1]
%tmp36 = icmp ne i32 %j.0.i, 8 ; <i1> [#uses=1]
%tmp37 = and i1 %tmp35, %tmp36 ; <i1> [#uses=1]
Modified: llvm/trunk/test/Analysis/ScalarEvolution/zext-signed-addrec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScalarEvolution/zext-signed-addrec.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScalarEvolution/zext-signed-addrec.ll (original)
+++ llvm/trunk/test/Analysis/ScalarEvolution/zext-signed-addrec.ll Fri Feb 27 15:17:42 2015
@@ -15,16 +15,16 @@ target triple = "x86_64-unknown-linux-gn
; CHECK-LABEL: foo
define i32 @foo() {
entry:
- %.pr = load i32* @b, align 4
+ %.pr = load i32, i32* @b, align 4
%cmp10 = icmp slt i32 %.pr, 1
br i1 %cmp10, label %for.cond1.preheader.lr.ph, label %entry.for.end9_crit_edge
entry.for.end9_crit_edge: ; preds = %entry
- %.pre = load i32* @c, align 4
+ %.pre = load i32, i32* @c, align 4
br label %for.end9
for.cond1.preheader.lr.ph: ; preds = %entry
- %0 = load i32* @a, align 4
+ %0 = load i32, i32* @a, align 4
%tobool = icmp eq i32 %0, 0
br i1 %tobool, label %for.cond1.preheader.for.cond1.preheader.split_crit_edge, label %return.loopexit.split
Modified: llvm/trunk/test/Analysis/ScopedNoAliasAA/basic-domains.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScopedNoAliasAA/basic-domains.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScopedNoAliasAA/basic-domains.ll (original)
+++ llvm/trunk/test/Analysis/ScopedNoAliasAA/basic-domains.ll Fri Feb 27 15:17:42 2015
@@ -5,15 +5,15 @@ target triple = "x86_64-unknown-linux-gn
define void @foo1(float* nocapture %a, float* nocapture readonly %c) #0 {
entry:
; CHECK-LABEL: Function: foo1
- %0 = load float* %c, align 4, !alias.scope !9
+ %0 = load float, float* %c, align 4, !alias.scope !9
%arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !noalias !6
- %1 = load float* %c, align 4, !alias.scope !5
+ %1 = load float, float* %c, align 4, !alias.scope !5
%arrayidx.i2 = getelementptr inbounds float, float* %a, i64 15
store float %1, float* %arrayidx.i2, align 4, !noalias !6
- %2 = load float* %c, align 4, !alias.scope !6
+ %2 = load float, float* %c, align 4, !alias.scope !6
%arrayidx.i3 = getelementptr inbounds float, float* %a, i64 16
store float %2, float* %arrayidx.i3, align 4, !noalias !5
@@ -42,15 +42,15 @@ attributes #0 = { nounwind uwtable }
; A list of scopes from both domains.
!9 = !{!2, !4, !7}
-; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
-; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
-; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
-; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
-; CHECK: NoAlias: %1 = load float* %c, align 4, !alias.scope !7 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
-; CHECK: NoAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
-; CHECK: NoAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
-; CHECK: MayAlias: %2 = load float* %c, align 4, !alias.scope !6 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
+; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
+; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
+; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
+; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
+; CHECK: NoAlias: %1 = load float, float* %c, align 4, !alias.scope !7 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
+; CHECK: NoAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
+; CHECK: NoAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
+; CHECK: MayAlias: %2 = load float, float* %c, align 4, !alias.scope !6 <-> store float %2, float* %arrayidx.i3, align 4, !noalias !7
; CHECK: NoAlias: store float %1, float* %arrayidx.i2, align 4, !noalias !6 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
; CHECK: NoAlias: store float %2, float* %arrayidx.i3, align 4, !noalias !7 <-> store float %0, float* %arrayidx.i, align 4, !noalias !6
; CHECK: NoAlias: store float %2, float* %arrayidx.i3, align 4, !noalias !7 <-> store float %1, float* %arrayidx.i2, align 4, !noalias !6
Modified: llvm/trunk/test/Analysis/ScopedNoAliasAA/basic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScopedNoAliasAA/basic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScopedNoAliasAA/basic.ll (original)
+++ llvm/trunk/test/Analysis/ScopedNoAliasAA/basic.ll Fri Feb 27 15:17:42 2015
@@ -5,18 +5,18 @@ target triple = "x86_64-unknown-linux-gn
define void @foo1(float* nocapture %a, float* nocapture readonly %c) #0 {
entry:
; CHECK-LABEL: Function: foo1
- %0 = load float* %c, align 4, !alias.scope !1
+ %0 = load float, float* %c, align 4, !alias.scope !1
%arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !noalias !1
- %1 = load float* %c, align 4
+ %1 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
-; CHECK: NoAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
+; CHECK: NoAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
; CHECK: NoAlias: store float %1, float* %arrayidx, align 4 <-> store float %0, float* %arrayidx.i, align 4, !noalias !0
}
Modified: llvm/trunk/test/Analysis/ScopedNoAliasAA/basic2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ScopedNoAliasAA/basic2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ScopedNoAliasAA/basic2.ll (original)
+++ llvm/trunk/test/Analysis/ScopedNoAliasAA/basic2.ll Fri Feb 27 15:17:42 2015
@@ -5,24 +5,24 @@ target triple = "x86_64-unknown-linux-gn
define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture readonly %c) #0 {
entry:
; CHECK-LABEL: Function: foo2
- %0 = load float* %c, align 4, !alias.scope !0
+ %0 = load float, float* %c, align 4, !alias.scope !0
%arrayidx.i = getelementptr inbounds float, float* %a, i64 5
store float %0, float* %arrayidx.i, align 4, !alias.scope !5, !noalias !4
%arrayidx1.i = getelementptr inbounds float, float* %b, i64 8
store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !5
- %1 = load float* %c, align 4
+ %1 = load float, float* %c, align 4
%arrayidx = getelementptr inbounds float, float* %a, i64 7
store float %1, float* %arrayidx, align 4
ret void
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalia
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalia
; CHECK: s !5
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noali
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noali
; CHECK: as !4
-; CHECK: MayAlias: %0 = load float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4
-; CHECK: MayAlias: %1 = load float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
+; CHECK: MayAlias: %0 = load float, float* %c, align 4, !alias.scope !0 <-> store float %1, float* %arrayidx, align 4
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4
+; CHECK: MayAlias: %1 = load float, float* %c, align 4 <-> store float %1, float* %arrayidx, align 4
; CHECK: NoAlias: store float %0, float* %arrayidx1.i, align 4, !alias.scope !0, !noalias !4 <-> store float %0, float* %arrayidx.i, align
; CHECK: 4, !alias.scope !4, !noalias !5
; CHECK: NoAlias: store float %1, float* %arrayidx, align 4 <-> store float %0, float* %arrayidx.i, align 4, !alias.scope !4, !noalias !5
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/PR17620.ll Fri Feb 27 15:17:42 2015
@@ -16,11 +16,11 @@ target datalayout = "e-p:64:64:64-i1:8:8
define %structA** @test(%classA* %this, i32** %p1) #0 align 2 {
entry:
; CHECK-LABEL: @test
-; CHECK: load i32** %p1, align 8, !tbaa
-; CHECK: load i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa
+; CHECK: load i32*, i32** %p1, align 8, !tbaa
+; CHECK: load i32*, i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa
; CHECK: call void @callee
- %0 = load i32** %p1, align 8, !tbaa !1
- %1 = load i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa !5
+ %0 = load i32*, i32** %p1, align 8, !tbaa !1
+ %1 = load i32*, i32** getelementptr (%classC* null, i32 0, i32 1, i32 0, i32 0), align 8, !tbaa !5
call void @callee(i32* %0, i32* %1)
unreachable
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/aliastest.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@
; CHECK: @test0_yes
; CHECK: add i8 %x, %x
define i8 @test0_yes(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !1
+ %x = load i8, i8* %a, !tbaa !1
store i8 0, i8* %b, !tbaa !2
- %y = load i8* %a, !tbaa !1
+ %y = load i8, i8* %a, !tbaa !1
%z = add i8 %x, %y
ret i8 %z
}
@@ -15,9 +15,9 @@ define i8 @test0_yes(i8* %a, i8* %b) nou
; CHECK: @test0_no
; CHECK: add i8 %x, %y
define i8 @test0_no(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !3
+ %x = load i8, i8* %a, !tbaa !3
store i8 0, i8* %b, !tbaa !4
- %y = load i8* %a, !tbaa !3
+ %y = load i8, i8* %a, !tbaa !3
%z = add i8 %x, %y
ret i8 %z
}
@@ -27,9 +27,9 @@ define i8 @test0_no(i8* %a, i8* %b) noun
; CHECK: @test1_yes
; CHECK: add i8 %x, %x
define i8 @test1_yes(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !5
+ %x = load i8, i8* %a, !tbaa !5
store i8 0, i8* %b
- %y = load i8* %a, !tbaa !5
+ %y = load i8, i8* %a, !tbaa !5
%z = add i8 %x, %y
ret i8 %z
}
@@ -37,9 +37,9 @@ define i8 @test1_yes(i8* %a, i8* %b) nou
; CHECK: @test1_no
; CHECK: add i8 %x, %y
define i8 @test1_no(i8* %a, i8* %b) nounwind {
- %x = load i8* %a, !tbaa !6
+ %x = load i8, i8* %a, !tbaa !6
store i8 0, i8* %b
- %y = load i8* %a, !tbaa !6
+ %y = load i8, i8* %a, !tbaa !6
%z = add i8 %x, %y
ret i8 %z
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/argument-promotion.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@ target datalayout = "E-p:64:64:64"
; CHECK-NOT: alloca
define internal i32 @test(i32* %X, i32* %Y, i32* %Q) {
store i32 77, i32* %Q, !tbaa !2
- %A = load i32* %X, !tbaa !1
- %B = load i32* %Y, !tbaa !1
+ %A = load i32, i32* %X, !tbaa !1
+ %B = load i32, i32* %Y, !tbaa !1
%C = add i32 %A, %B
ret i32 %C
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dse.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dse.ll Fri Feb 27 15:17:42 2015
@@ -4,47 +4,47 @@ target datalayout = "e-m:e-i64:64-f80:12
; DSE should make use of TBAA.
; CHECK: @test0_yes
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test0_yes(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a, !tbaa !1
- %y = load i8* %b, !tbaa !2
+ %y = load i8, i8* %b, !tbaa !2
store i8 1, i8* %a, !tbaa !1
ret i8 %y
}
; CHECK: @test0_no
; CHECK-NEXT: store i8 0, i8* %a
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test0_no(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a, !tbaa !3
- %y = load i8* %b, !tbaa !4
+ %y = load i8, i8* %b, !tbaa !4
store i8 1, i8* %a, !tbaa !3
ret i8 %y
}
; CHECK: @test1_yes
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test1_yes(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a
- %y = load i8* %b, !tbaa !5
+ %y = load i8, i8* %b, !tbaa !5
store i8 1, i8* %a
ret i8 %y
}
; CHECK: @test1_no
; CHECK-NEXT: store i8 0, i8* %a
-; CHECK-NEXT: load i8* %b
+; CHECK-NEXT: load i8, i8* %b
; CHECK-NEXT: store i8 1, i8* %a
; CHECK-NEXT: ret i8 %y
define i8 @test1_no(i8* %a, i8* %b) nounwind {
store i8 0, i8* %a
- %y = load i8* %b, !tbaa !6
+ %y = load i8, i8* %b, !tbaa !6
store i8 1, i8* %a
ret i8 %y
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/dynamic-indices.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ target datalayout = "e-p:64:64:64-i1:8:8
; CHECK: for.end:
; CHECK: %arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1
-; CHECK: %tmp32 = load i64* %arrayidx31, align 8, !tbaa [[TAG:!.*]]
+; CHECK: %tmp32 = load i64, i64* %arrayidx31, align 8, !tbaa [[TAG:!.*]]
define void @vrlh(%union.vector_t* %va, %union.vector_t* %vb, %union.vector_t* %vd) nounwind {
entry:
@@ -26,21 +26,21 @@ for.body:
%idxprom = sext i32 %sub to i64
%half = bitcast %union.vector_t* %vb to [8 x i16]*
%arrayidx = getelementptr inbounds [8 x i16], [8 x i16]* %half, i64 0, i64 %idxprom
- %tmp4 = load i16* %arrayidx, align 2, !tbaa !0
+ %tmp4 = load i16, i16* %arrayidx, align 2, !tbaa !0
%conv = zext i16 %tmp4 to i32
%and = and i32 %conv, 15
%sub6 = sub nsw i32 7, %i.01
%idxprom7 = sext i32 %sub6 to i64
%half9 = bitcast %union.vector_t* %va to [8 x i16]*
%arrayidx10 = getelementptr inbounds [8 x i16], [8 x i16]* %half9, i64 0, i64 %idxprom7
- %tmp11 = load i16* %arrayidx10, align 2, !tbaa !0
+ %tmp11 = load i16, i16* %arrayidx10, align 2, !tbaa !0
%conv12 = zext i16 %tmp11 to i32
%shl = shl i32 %conv12, %and
%sub15 = sub nsw i32 7, %i.01
%idxprom16 = sext i32 %sub15 to i64
%half18 = bitcast %union.vector_t* %va to [8 x i16]*
%arrayidx19 = getelementptr inbounds [8 x i16], [8 x i16]* %half18, i64 0, i64 %idxprom16
- %tmp20 = load i16* %arrayidx19, align 2, !tbaa !0
+ %tmp20 = load i16, i16* %arrayidx19, align 2, !tbaa !0
%conv21 = zext i16 %tmp20 to i32
%sub23 = sub nsw i32 16, %and
%shr = lshr i32 %conv21, %sub23
@@ -57,11 +57,11 @@ for.body:
for.end: ; preds = %for.body
%arrayidx31 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 1
- %tmp32 = load i64* %arrayidx31, align 8, !tbaa !3
+ %tmp32 = load i64, i64* %arrayidx31, align 8, !tbaa !3
%arrayidx35 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 1
store i64 %tmp32, i64* %arrayidx35, align 8, !tbaa !3
%arrayidx37 = getelementptr inbounds %union.vector_t, %union.vector_t* %t, i64 0, i32 0, i64 0
- %tmp38 = load i64* %arrayidx37, align 8, !tbaa !3
+ %tmp38 = load i64, i64* %arrayidx37, align 8, !tbaa !3
%arrayidx41 = getelementptr inbounds %union.vector_t, %union.vector_t* %vd, i64 0, i32 0, i64 0
store i64 %tmp38, i64* %arrayidx41, align 8, !tbaa !3
ret void
@@ -82,7 +82,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i2.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%f = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i2.01, i32 1
- %tmp6 = load float* %f, align 4, !tbaa !5
+ %tmp6 = load float, float* %f, align 4, !tbaa !5
%mul = fmul float %tmp6, 0x40019999A0000000
store float %mul, float* %f, align 4, !tbaa !5
%inc = add nsw i64 %i2.01, 1
@@ -91,7 +91,7 @@ for.body:
for.end: ; preds = %for.body
%i9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 0
- %tmp10 = load i32* %i9, align 4, !tbaa !4
+ %tmp10 = load i32, i32* %i9, align 4, !tbaa !4
ret i32 %tmp10
}
@@ -110,7 +110,7 @@ entry:
for.body: ; preds = %entry, %for.body
%i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
%i5 = getelementptr inbounds %struct.X, %struct.X* %a, i64 %i.01, i32 0
- %tmp6 = load i32* %i5, align 4, !tbaa !4
+ %tmp6 = load i32, i32* %i5, align 4, !tbaa !4
%mul = mul nsw i32 %tmp6, 3
store i32 %mul, i32* %i5, align 4, !tbaa !4
%inc = add nsw i64 %i.01, 1
@@ -119,7 +119,7 @@ for.body:
for.end: ; preds = %for.body
%f9 = getelementptr inbounds %struct.X, %struct.X* %a, i64 0, i32 1
- %tmp10 = load float* %f9, align 4, !tbaa !5
+ %tmp10 = load float, float* %f9, align 4, !tbaa !5
ret float %tmp10
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/gvn-nonlocal-type-mismatch.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p, !tbaa !1
+ %t = load i32, i32* %p, !tbaa !1
store i32 %t, i32* %q
ret void
@@ -32,11 +32,11 @@ if.else:
; CHECK: @watch_out_for_type_change
; CHECK: if.then:
-; CHECK: %t = load i32* %p
+; CHECK: %t = load i32, i32* %p
; CHECK: store i32 %t, i32* %q
; CHECK: ret void
; CHECK: if.else:
-; CHECK: %u = load i32* %p
+; CHECK: %u = load i32, i32* %p
; CHECK: store i32 %u, i32* %q
define void @watch_out_for_type_change(i1 %c, i32* %p, i32* %p1, i32* %q) nounwind {
@@ -46,12 +46,12 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p, !tbaa !3
+ %t = load i32, i32* %p, !tbaa !3
store i32 %t, i32* %q
ret void
if.else:
- %u = load i32* %p, !tbaa !4
+ %u = load i32, i32* %p, !tbaa !4
store i32 %u, i32* %q
ret void
}
@@ -64,7 +64,7 @@ if.else:
; CHECK: store i32 0, i32* %q
; CHECK: ret void
; CHECK: if.else:
-; CHECK: %u = load i32* %p
+; CHECK: %u = load i32, i32* %p
; CHECK: store i32 %u, i32* %q
define void @watch_out_for_another_type_change(i1 %c, i32* %p, i32* %p1, i32* %q) nounwind {
@@ -74,12 +74,12 @@ entry:
br i1 %c, label %if.else, label %if.then
if.then:
- %t = load i32* %p, !tbaa !4
+ %t = load i32, i32* %p, !tbaa !4
store i32 %t, i32* %q
ret void
if.else:
- %u = load i32* %p, !tbaa !3
+ %u = load i32, i32* %p, !tbaa !3
store i32 %u, i32* %q
ret void
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/licm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/licm.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/licm.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
; CHECK: @foo
; CHECK: entry:
-; CHECK-NEXT: %tmp3 = load double** @P, !tbaa !0
+; CHECK-NEXT: %tmp3 = load double*, double** @P, !tbaa !0
; CHECK-NEXT: br label %for.body
@P = common global double* null
@@ -16,9 +16,9 @@ entry:
for.body: ; preds = %entry, %for.body
%i.07 = phi i64 [ %inc, %for.body ], [ 0, %entry ]
- %tmp3 = load double** @P, !tbaa !1
+ %tmp3 = load double*, double** @P, !tbaa !1
%scevgep = getelementptr double, double* %tmp3, i64 %i.07
- %tmp4 = load double* %scevgep, !tbaa !2
+ %tmp4 = load double, double* %scevgep, !tbaa !2
%mul = fmul double %tmp4, 2.300000e+00
store double %mul, double* %scevgep, !tbaa !2
%inc = add i64 %i.07, 1
@@ -49,9 +49,9 @@ entry:
br label %loop
loop:
- %tmp51 = load i8** %p, !tbaa !4
+ %tmp51 = load i8*, i8** %p, !tbaa !4
store i8* %tmp51, i8** %p
- %tmp40 = load i8* %q, !tbaa !5
+ %tmp40 = load i8, i8* %q, !tbaa !5
store i8 %tmp40, i8* %q
br label %loop
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/placement-tbaa.ll Fri Feb 27 15:17:42 2015
@@ -33,20 +33,20 @@ entry:
%call = call noalias i8* @_Znwm(i64 8)
%0 = bitcast i8* %call to %struct.Foo*
store %struct.Foo* %0, %struct.Foo** %f, align 8, !tbaa !4
- %1 = load %struct.Foo** %f, align 8, !tbaa !4
+ %1 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
%i = getelementptr inbounds %struct.Foo, %struct.Foo* %1, i32 0, i32 0
store i64 1, i64* %i, align 8, !tbaa !6
store i32 0, i32* %i1, align 4, !tbaa !0
br label %for.cond
for.cond:
- %2 = load i32* %i1, align 4, !tbaa !0
- %3 = load i32* %n.addr, align 4, !tbaa !0
+ %2 = load i32, i32* %i1, align 4, !tbaa !0
+ %3 = load i32, i32* %n.addr, align 4, !tbaa !0
%cmp = icmp slt i32 %2, %3
br i1 %cmp, label %for.body, label %for.end
for.body:
- %4 = load %struct.Foo** %f, align 8, !tbaa !4
+ %4 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
%5 = bitcast %struct.Foo* %4 to i8*
%new.isnull = icmp eq i8* %5, null
br i1 %new.isnull, label %new.cont, label %new.notnull
@@ -58,10 +58,10 @@ new.notnull:
new.cont:
%7 = phi %struct.Bar* [ %6, %new.notnull ], [ null, %for.body ]
store %struct.Bar* %7, %struct.Bar** %b, align 8, !tbaa !4
- %8 = load %struct.Bar** %b, align 8, !tbaa !4
+ %8 = load %struct.Bar*, %struct.Bar** %b, align 8, !tbaa !4
%p = getelementptr inbounds %struct.Bar, %struct.Bar* %8, i32 0, i32 0
store i8* null, i8** %p, align 8, !tbaa !9
- %9 = load %struct.Foo** %f, align 8, !tbaa !4
+ %9 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
%10 = bitcast %struct.Foo* %9 to i8*
%new.isnull2 = icmp eq i8* %10, null
br i1 %new.isnull2, label %new.cont4, label %new.notnull3
@@ -73,23 +73,23 @@ new.notnull3:
new.cont4:
%12 = phi %struct.Foo* [ %11, %new.notnull3 ], [ null, %new.cont ]
store %struct.Foo* %12, %struct.Foo** %f, align 8, !tbaa !4
- %13 = load i32* %i1, align 4, !tbaa !0
+ %13 = load i32, i32* %i1, align 4, !tbaa !0
%conv = sext i32 %13 to i64
- %14 = load %struct.Foo** %f, align 8, !tbaa !4
+ %14 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
%i5 = getelementptr inbounds %struct.Foo, %struct.Foo* %14, i32 0, i32 0
store i64 %conv, i64* %i5, align 8, !tbaa !6
br label %for.inc
for.inc:
- %15 = load i32* %i1, align 4, !tbaa !0
+ %15 = load i32, i32* %i1, align 4, !tbaa !0
%inc = add nsw i32 %15, 1
store i32 %inc, i32* %i1, align 4, !tbaa !0
br label %for.cond
for.end:
- %16 = load %struct.Foo** %f, align 8, !tbaa !4
+ %16 = load %struct.Foo*, %struct.Foo** %f, align 8, !tbaa !4
%i6 = getelementptr inbounds %struct.Foo, %struct.Foo* %16, i32 0, i32 0
- %17 = load i64* %i6, align 8, !tbaa !6
+ %17 = load i64, i64* %i6, align 8, !tbaa !6
ret i64 %17
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/precedence.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/precedence.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/precedence.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/precedence.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ entry:
store i32 0, i32* %x, !tbaa !0
%0 = bitcast i32* %x to float*
store float 0x4002666660000000, float* %0, !tbaa !3
- %tmp3 = load i32* %x, !tbaa !0
+ %tmp3 = load i32, i32* %x, !tbaa !0
ret i32 %tmp3
}
@@ -35,7 +35,7 @@ entry:
%0 = bitcast i64* %x to i8*
%1 = getelementptr i8, i8* %0, i64 1
store i8 1, i8* %1, !tbaa !5
- %tmp3 = load i64* %x, !tbaa !4
+ %tmp3 = load i64, i64* %x, !tbaa !4
ret i64 %tmp3
}
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/sink.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/sink.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/sink.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/sink.ll Fri Feb 27 15:17:42 2015
@@ -1,11 +1,11 @@
; RUN: opt -tbaa -sink -S < %s | FileCheck %s
; CHECK: a:
-; CHECK: %f = load float* %p, !tbaa [[TAGA:!.*]]
+; CHECK: %f = load float, float* %p, !tbaa [[TAGA:!.*]]
; CHECK: store float %f, float* %q
define void @foo(float* %p, i1 %c, float* %q, float* %r) {
- %f = load float* %p, !tbaa !0
+ %f = load float, float* %p, !tbaa !0
store float 0.0, float* %r, !tbaa !1
br i1 %c, label %a, label %b
a:
Modified: llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll (original)
+++ llvm/trunk/test/Analysis/TypeBasedAliasAnalysis/tbaa-path.ll Fri Feb 27 15:17:42 2015
@@ -17,7 +17,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; OPT: %[[RET:.*]] = load i32*
+; OPT: %[[RET:.*]] = load i32, i32*
; OPT: ret i32 %[[RET]]
%s.addr = alloca i32*, align 8
%A.addr = alloca %struct.StructA*, align 8
@@ -25,13 +25,13 @@ entry:
store i32* %s, i32** %s.addr, align 8, !tbaa !0
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load i32** %s.addr, align 8, !tbaa !0
+ %0 = load i32*, i32** %s.addr, align 8, !tbaa !0
store i32 1, i32* %0, align 4, !tbaa !6
- %1 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %1 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 1
store i32 4, i32* %f32, align 4, !tbaa !8
- %2 = load i32** %s.addr, align 8, !tbaa !0
- %3 = load i32* %2, align 4, !tbaa !6
+ %2 = load i32*, i32** %s.addr, align 8, !tbaa !0
+ %3 = load i32, i32* %2, align 4, !tbaa !6
ret i32 %3
}
@@ -51,13 +51,13 @@ entry:
store i32* %s, i32** %s.addr, align 8, !tbaa !0
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load i32** %s.addr, align 8, !tbaa !0
+ %0 = load i32*, i32** %s.addr, align 8, !tbaa !0
store i32 1, i32* %0, align 4, !tbaa !6
- %1 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %1 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !11
- %2 = load i32** %s.addr, align 8, !tbaa !0
- %3 = load i32* %2, align 4, !tbaa !6
+ %2 = load i32*, i32** %s.addr, align 8, !tbaa !0
+ %3 = load i32, i32* %2, align 4, !tbaa !6
ret i32 %3
}
@@ -69,7 +69,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; OPT: %[[RET:.*]] = load i32*
+; OPT: %[[RET:.*]] = load i32, i32*
; OPT: ret i32 %[[RET]]
%A.addr = alloca %struct.StructA*, align 8
%B.addr = alloca %struct.StructB*, align 8
@@ -77,16 +77,16 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
%a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
%f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !12
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !8
+ %3 = load i32, i32* %f322, align 4, !tbaa !8
ret i32 %3
}
@@ -106,16 +106,16 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
%a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
%f16 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !14
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !8
+ %3 = load i32, i32* %f321, align 4, !tbaa !8
ret i32 %3
}
@@ -135,15 +135,15 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
%f321 = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 2
store i32 4, i32* %f321, align 4, !tbaa !15
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !8
+ %3 = load i32, i32* %f322, align 4, !tbaa !8
ret i32 %3
}
@@ -163,16 +163,16 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructB* %B, %struct.StructB** %B.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructB** %B.addr, align 8, !tbaa !0
+ %1 = load %struct.StructB*, %struct.StructB** %B.addr, align 8, !tbaa !0
%a = getelementptr inbounds %struct.StructB, %struct.StructB* %1, i32 0, i32 1
%f32_2 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 3
store i32 4, i32* %f32_2, align 4, !tbaa !16
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !8
+ %3 = load i32, i32* %f321, align 4, !tbaa !8
ret i32 %3
}
@@ -192,15 +192,15 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructS** %S.addr, align 8, !tbaa !0
+ %1 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
%f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !17
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f322 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !8
+ %3 = load i32, i32* %f322, align 4, !tbaa !8
ret i32 %3
}
@@ -220,15 +220,15 @@ entry:
store %struct.StructA* %A, %struct.StructA** %A.addr, align 8, !tbaa !0
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %0 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !8
- %1 = load %struct.StructS** %S.addr, align 8, !tbaa !0
+ %1 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
%f16 = getelementptr inbounds %struct.StructS, %struct.StructS* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !19
- %2 = load %struct.StructA** %A.addr, align 8, !tbaa !0
+ %2 = load %struct.StructA*, %struct.StructA** %A.addr, align 8, !tbaa !0
%f321 = getelementptr inbounds %struct.StructA, %struct.StructA* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !8
+ %3 = load i32, i32* %f321, align 4, !tbaa !8
ret i32 %3
}
@@ -248,15 +248,15 @@ entry:
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructS** %S.addr, align 8, !tbaa !0
+ %0 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !17
- %1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0
+ %1 = load %struct.StructS2*, %struct.StructS2** %S2.addr, align 8, !tbaa !0
%f321 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 1
store i32 4, i32* %f321, align 4, !tbaa !20
- %2 = load %struct.StructS** %S.addr, align 8, !tbaa !0
+ %2 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
%f322 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1
- %3 = load i32* %f322, align 4, !tbaa !17
+ %3 = load i32, i32* %f322, align 4, !tbaa !17
ret i32 %3
}
@@ -276,15 +276,15 @@ entry:
store %struct.StructS* %S, %struct.StructS** %S.addr, align 8, !tbaa !0
store %struct.StructS2* %S2, %struct.StructS2** %S2.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructS** %S.addr, align 8, !tbaa !0
+ %0 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
%f32 = getelementptr inbounds %struct.StructS, %struct.StructS* %0, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !17
- %1 = load %struct.StructS2** %S2.addr, align 8, !tbaa !0
+ %1 = load %struct.StructS2*, %struct.StructS2** %S2.addr, align 8, !tbaa !0
%f16 = getelementptr inbounds %struct.StructS2, %struct.StructS2* %1, i32 0, i32 0
store i16 4, i16* %f16, align 2, !tbaa !22
- %2 = load %struct.StructS** %S.addr, align 8, !tbaa !0
+ %2 = load %struct.StructS*, %struct.StructS** %S.addr, align 8, !tbaa !0
%f321 = getelementptr inbounds %struct.StructS, %struct.StructS* %2, i32 0, i32 1
- %3 = load i32* %f321, align 4, !tbaa !17
+ %3 = load i32, i32* %f321, align 4, !tbaa !17
ret i32 %3
}
@@ -304,21 +304,21 @@ entry:
store %struct.StructC* %C, %struct.StructC** %C.addr, align 8, !tbaa !0
store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructC** %C.addr, align 8, !tbaa !0
+ %0 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0
%b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1
%a = getelementptr inbounds %struct.StructB, %struct.StructB* %b, i32 0, i32 1
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !23
- %1 = load %struct.StructD** %D.addr, align 8, !tbaa !0
+ %1 = load %struct.StructD*, %struct.StructD** %D.addr, align 8, !tbaa !0
%b1 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1
%a2 = getelementptr inbounds %struct.StructB, %struct.StructB* %b1, i32 0, i32 1
%f323 = getelementptr inbounds %struct.StructA, %struct.StructA* %a2, i32 0, i32 1
store i32 4, i32* %f323, align 4, !tbaa !25
- %2 = load %struct.StructC** %C.addr, align 8, !tbaa !0
+ %2 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0
%b4 = getelementptr inbounds %struct.StructC, %struct.StructC* %2, i32 0, i32 1
%a5 = getelementptr inbounds %struct.StructB, %struct.StructB* %b4, i32 0, i32 1
%f326 = getelementptr inbounds %struct.StructA, %struct.StructA* %a5, i32 0, i32 1
- %3 = load i32* %f326, align 4, !tbaa !23
+ %3 = load i32, i32* %f326, align 4, !tbaa !23
ret i32 %3
}
@@ -330,7 +330,7 @@ entry:
; OPT: define
; OPT: store i32 1
; OPT: store i32 4
-; OPT: %[[RET:.*]] = load i32*
+; OPT: %[[RET:.*]] = load i32, i32*
; OPT: ret i32 %[[RET]]
%C.addr = alloca %struct.StructC*, align 8
%D.addr = alloca %struct.StructD*, align 8
@@ -340,24 +340,24 @@ entry:
store %struct.StructC* %C, %struct.StructC** %C.addr, align 8, !tbaa !0
store %struct.StructD* %D, %struct.StructD** %D.addr, align 8, !tbaa !0
store i64 %count, i64* %count.addr, align 8, !tbaa !4
- %0 = load %struct.StructC** %C.addr, align 8, !tbaa !0
+ %0 = load %struct.StructC*, %struct.StructC** %C.addr, align 8, !tbaa !0
%b = getelementptr inbounds %struct.StructC, %struct.StructC* %0, i32 0, i32 1
store %struct.StructB* %b, %struct.StructB** %b1, align 8, !tbaa !0
- %1 = load %struct.StructD** %D.addr, align 8, !tbaa !0
+ %1 = load %struct.StructD*, %struct.StructD** %D.addr, align 8, !tbaa !0
%b3 = getelementptr inbounds %struct.StructD, %struct.StructD* %1, i32 0, i32 1
store %struct.StructB* %b3, %struct.StructB** %b2, align 8, !tbaa !0
- %2 = load %struct.StructB** %b1, align 8, !tbaa !0
+ %2 = load %struct.StructB*, %struct.StructB** %b1, align 8, !tbaa !0
%a = getelementptr inbounds %struct.StructB, %struct.StructB* %2, i32 0, i32 1
%f32 = getelementptr inbounds %struct.StructA, %struct.StructA* %a, i32 0, i32 1
store i32 1, i32* %f32, align 4, !tbaa !12
- %3 = load %struct.StructB** %b2, align 8, !tbaa !0
+ %3 = load %struct.StructB*, %struct.StructB** %b2, align 8, !tbaa !0
%a4 = getelementptr inbounds %struct.StructB, %struct.StructB* %3, i32 0, i32 1
%f325 = getelementptr inbounds %struct.StructA, %struct.StructA* %a4, i32 0, i32 1
store i32 4, i32* %f325, align 4, !tbaa !12
- %4 = load %struct.StructB** %b1, align 8, !tbaa !0
+ %4 = load %struct.StructB*, %struct.StructB** %b1, align 8, !tbaa !0
%a6 = getelementptr inbounds %struct.StructB, %struct.StructB* %4, i32 0, i32 1
%f327 = getelementptr inbounds %struct.StructA, %struct.StructA* %a6, i32 0, i32 1
- %5 = load i32* %f327, align 4, !tbaa !12
+ %5 = load i32, i32* %f327, align 4, !tbaa !12
ret i32 %5
}
Modified: llvm/trunk/test/Analysis/ValueTracking/memory-dereferenceable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ValueTracking/memory-dereferenceable.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ValueTracking/memory-dereferenceable.ll (original)
+++ llvm/trunk/test/Analysis/ValueTracking/memory-dereferenceable.ll Fri Feb 27 15:17:42 2015
@@ -18,15 +18,15 @@ define void @test(i32 addrspace(1)* dere
; CHECK-NOT: %nparam
entry:
%globalptr = getelementptr inbounds [6 x i8], [6 x i8]* @globalstr, i32 0, i32 0
- %load1 = load i8* %globalptr
+ %load1 = load i8, i8* %globalptr
%alloca = alloca i1
- %load2 = load i1* %alloca
- %load3 = load i32 addrspace(1)* %dparam
+ %load2 = load i1, i1* %alloca
+ %load3 = load i32, i32 addrspace(1)* %dparam
%tok = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0, i32 addrspace(1)* %dparam)
%relocate = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %tok, i32 4, i32 4)
- %load4 = load i32 addrspace(1)* %relocate
+ %load4 = load i32, i32 addrspace(1)* %relocate
%nparam = getelementptr i32, i32 addrspace(1)* %dparam, i32 5
- %load5 = load i32 addrspace(1)* %nparam
+ %load5 = load i32, i32 addrspace(1)* %nparam
ret void
}
Modified: llvm/trunk/test/Assembler/2002-04-29-NameBinding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2002-04-29-NameBinding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2002-04-29-NameBinding.ll (original)
+++ llvm/trunk/test/Assembler/2002-04-29-NameBinding.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@
define i32 @createtask() {
%v1 = alloca i32 ;; Alloca should have one use!
- %reg112 = load i32* %v1 ;; This load should not use the global!
+ %reg112 = load i32, i32* %v1 ;; This load should not use the global!
ret i32 %reg112
}
Modified: llvm/trunk/test/Assembler/2002-08-19-BytecodeReader.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2002-08-19-BytecodeReader.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2002-08-19-BytecodeReader.ll (original)
+++ llvm/trunk/test/Assembler/2002-08-19-BytecodeReader.ll Fri Feb 27 15:17:42 2015
@@ -11,8 +11,8 @@
define void @Evaluate() {
%reg1321 = getelementptr %CHESS_POSITION, %CHESS_POSITION* @search, i64 0, i32 1 ; <i32*> [#uses=1]
- %reg114 = load i32* %reg1321 ; <i32> [#uses=0]
+ %reg114 = load i32, i32* %reg1321 ; <i32> [#uses=0]
%reg1801 = getelementptr %CHESS_POSITION, %CHESS_POSITION* @search, i64 0, i32 0 ; <i32*> [#uses=1]
- %reg182 = load i32* %reg1801 ; <i32> [#uses=0]
+ %reg182 = load i32, i32* %reg1801 ; <i32> [#uses=0]
ret void
}
Modified: llvm/trunk/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll (original)
+++ llvm/trunk/test/Assembler/2003-08-20-ConstantExprGEP-Fold.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
@.str_1 = internal constant [6 x i8] c"_Bool\00" ; <[6 x i8]*> [#uses=2]
define i32 @test() {
- %tmp.54 = load i8* getelementptr ([6 x i8]* @.str_1, i64 0, i64 1) ; <i8> [#uses=1]
+ %tmp.54 = load i8, i8* getelementptr ([6 x i8]* @.str_1, i64 0, i64 1) ; <i8> [#uses=1]
%tmp.55 = icmp ne i8 %tmp.54, 66 ; <i1> [#uses=1]
br i1 %tmp.55, label %then.7, label %endif.7
Modified: llvm/trunk/test/Assembler/2004-06-07-VerifierBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2004-06-07-VerifierBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2004-06-07-VerifierBug.ll (original)
+++ llvm/trunk/test/Assembler/2004-06-07-VerifierBug.ll Fri Feb 27 15:17:42 2015
@@ -7,6 +7,6 @@ entry:
loop: ; preds = %loop
%tmp.4.i9 = getelementptr i32, i32* null, i32 %tmp.5.i10 ; <i32*> [#uses=1]
- %tmp.5.i10 = load i32* %tmp.4.i9 ; <i32> [#uses=1]
+ %tmp.5.i10 = load i32, i32* %tmp.4.i9 ; <i32> [#uses=1]
br label %loop
}
Modified: llvm/trunk/test/Assembler/2007-01-05-Cmp-ConstExpr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2007-01-05-Cmp-ConstExpr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2007-01-05-Cmp-ConstExpr.ll (original)
+++ llvm/trunk/test/Assembler/2007-01-05-Cmp-ConstExpr.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
define i32 @main(i32 %argc, i8** %argv) {
entry:
%tmp65 = getelementptr i8*, i8** %argv, i32 1 ; <i8**> [#uses=1]
- %tmp66 = load i8** %tmp65 ; <i8*> [#uses=0]
+ %tmp66 = load i8*, i8** %tmp65 ; <i8*> [#uses=0]
br i1 icmp ne (i32 sub (i32 ptrtoint (i8* getelementptr ([4 x i8]* @str, i32 0, i64 1) to i32), i32 ptrtoint ([4 x i8]* @str to i32)), i32 1), label %exit_1, label %exit_2
exit_1: ; preds = %entry
Modified: llvm/trunk/test/Assembler/2007-04-20-AlignedLoad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2007-04-20-AlignedLoad.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2007-04-20-AlignedLoad.ll (original)
+++ llvm/trunk/test/Assembler/2007-04-20-AlignedLoad.ll Fri Feb 27 15:17:42 2015
@@ -3,6 +3,6 @@
define i32 @test(i32* %arg) {
entry:
- %tmp2 = load i32* %arg, align 1024 ; <i32> [#uses=1]
+ %tmp2 = load i32, i32* %arg, align 1024 ; <i32> [#uses=1]
ret i32 %tmp2
}
Modified: llvm/trunk/test/Assembler/2007-12-11-AddressSpaces.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2007-12-11-AddressSpaces.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2007-12-11-AddressSpaces.ll (original)
+++ llvm/trunk/test/Assembler/2007-12-11-AddressSpaces.ll Fri Feb 27 15:17:42 2015
@@ -12,15 +12,15 @@
define void @foo() {
entry:
- %tmp1 = load i32 addrspace(33)* addrspace(42)* getelementptr (%struct.mystruct addrspace(42)* @input, i32 0, i32 3), align 4 ; <i32 addrspace(33)*> [#uses=1]
+ %tmp1 = load i32 addrspace(33)*, i32 addrspace(33)* addrspace(42)* getelementptr (%struct.mystruct addrspace(42)* @input, i32 0, i32 3), align 4 ; <i32 addrspace(33)*> [#uses=1]
store i32 addrspace(33)* %tmp1, i32 addrspace(33)* addrspace(66)* getelementptr (%struct.mystruct addrspace(66)* @output, i32 0, i32 1), align 4
ret void
}
define i32 addrspace(11)* @bar(i32 addrspace(11)* addrspace(22)* addrspace(33)* %x) {
entry:
- %tmp1 = load i32 addrspace(11)* addrspace(22)* addrspace(33)* @y, align 4 ; <i32 addrspace(11)* addrspace(22)*> [#uses=2]
+ %tmp1 = load i32 addrspace(11)* addrspace(22)*, i32 addrspace(11)* addrspace(22)* addrspace(33)* @y, align 4 ; <i32 addrspace(11)* addrspace(22)*> [#uses=2]
store i32 addrspace(11)* addrspace(22)* %tmp1, i32 addrspace(11)* addrspace(22)* addrspace(33)* %x, align 4
- %tmp5 = load i32 addrspace(11)* addrspace(22)* %tmp1, align 4 ; <i32 addrspace(11)*> [#uses=1]
+ %tmp5 = load i32 addrspace(11)*, i32 addrspace(11)* addrspace(22)* %tmp1, align 4 ; <i32 addrspace(11)*> [#uses=1]
ret i32 addrspace(11)* %tmp5
}
Modified: llvm/trunk/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll (original)
+++ llvm/trunk/test/Assembler/2010-02-05-FunctionLocalMetadataBecomesNull.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ define i32 @main() nounwind readonly {
; CHECK: call void @llvm.dbg.value(metadata i64 72,
call void @llvm.dbg.declare(metadata i64* %diff1, metadata !0, metadata !{!"0x102"})
store i64 72, i64* %diff1, align 8
- %v1 = load %struct.test** @TestArrayPtr, align 8 ; <%struct.test*> [#uses=1]
+ %v1 = load %struct.test*, %struct.test** @TestArrayPtr, align 8 ; <%struct.test*> [#uses=1]
%v2 = ptrtoint %struct.test* %v1 to i64 ; <i64> [#uses=1]
%v3 = sub i64 %v2, ptrtoint ([10 x %struct.test]* @TestArray to i64) ; <i64> [#uses=1]
store i64 %v3, i64* %diff1, align 8
Modified: llvm/trunk/test/Assembler/align-inst-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/align-inst-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/align-inst-load.ll (original)
+++ llvm/trunk/test/Assembler/align-inst-load.ll Fri Feb 27 15:17:42 2015
@@ -1,6 +1,6 @@
; RUN: not llvm-as %s -o /dev/null 2>/dev/null
define void @foo() {
- load i1* %p, align 1073741824
+ load i1, i1* %p, align 1073741824
ret void
}
Modified: llvm/trunk/test/Assembler/align-inst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/align-inst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/align-inst.ll (original)
+++ llvm/trunk/test/Assembler/align-inst.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define void @foo() {
%p = alloca i1, align 536870912
- load i1* %p, align 536870912
+ load i1, i1* %p, align 536870912
store i1 false, i1* %p, align 536870912
ret void
}
Modified: llvm/trunk/test/Assembler/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/atomic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/atomic.ll (original)
+++ llvm/trunk/test/Assembler/atomic.ll Fri Feb 27 15:17:42 2015
@@ -3,10 +3,10 @@
; Basic smoke test for atomic operations.
define void @f(i32* %x) {
- ; CHECK: load atomic i32* %x unordered, align 4
- load atomic i32* %x unordered, align 4
- ; CHECK: load atomic volatile i32* %x singlethread acquire, align 4
- load atomic volatile i32* %x singlethread acquire, align 4
+ ; CHECK: load atomic i32, i32* %x unordered, align 4
+ load atomic i32, i32* %x unordered, align 4
+ ; CHECK: load atomic volatile i32, i32* %x singlethread acquire, align 4
+ load atomic volatile i32, i32* %x singlethread acquire, align 4
; CHECK: store atomic i32 3, i32* %x release, align 4
store atomic i32 3, i32* %x release, align 4
; CHECK: store atomic volatile i32 3, i32* %x singlethread monotonic, align 4
Modified: llvm/trunk/test/Assembler/fast-math-flags.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/fast-math-flags.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/fast-math-flags.ll (original)
+++ llvm/trunk/test/Assembler/fast-math-flags.ll Fri Feb 27 15:17:42 2015
@@ -9,12 +9,12 @@
define float @none(float %x, float %y) {
entry:
-; CHECK: %vec = load <3 x float>* @vec
- %vec = load <3 x float>* @vec
-; CHECK: %select = load i1* @select
- %select = load i1* @select
-; CHECK: %arr = load [3 x float]* @arr
- %arr = load [3 x float]* @arr
+; CHECK: %vec = load <3 x float>, <3 x float>* @vec
+ %vec = load <3 x float>, <3 x float>* @vec
+; CHECK: %select = load i1, i1* @select
+ %select = load i1, i1* @select
+; CHECK: %arr = load [3 x float], [3 x float]* @arr
+ %arr = load [3 x float], [3 x float]* @arr
; CHECK: %a = fadd float %x, %y
%a = fadd float %x, %y
@@ -43,12 +43,12 @@ entry:
; CHECK: no_nan
define float @no_nan(float %x, float %y) {
entry:
-; CHECK: %vec = load <3 x float>* @vec
- %vec = load <3 x float>* @vec
-; CHECK: %select = load i1* @select
- %select = load i1* @select
-; CHECK: %arr = load [3 x float]* @arr
- %arr = load [3 x float]* @arr
+; CHECK: %vec = load <3 x float>, <3 x float>* @vec
+ %vec = load <3 x float>, <3 x float>* @vec
+; CHECK: %select = load i1, i1* @select
+ %select = load i1, i1* @select
+; CHECK: %arr = load [3 x float], [3 x float]* @arr
+ %arr = load [3 x float], [3 x float]* @arr
; CHECK: %a = fadd nnan float %x, %y
%a = fadd nnan float %x, %y
@@ -77,12 +77,12 @@ entry:
; CHECK: no_nan_inf
define float @no_nan_inf(float %x, float %y) {
entry:
-; CHECK: %vec = load <3 x float>* @vec
- %vec = load <3 x float>* @vec
-; CHECK: %select = load i1* @select
- %select = load i1* @select
-; CHECK: %arr = load [3 x float]* @arr
- %arr = load [3 x float]* @arr
+; CHECK: %vec = load <3 x float>, <3 x float>* @vec
+ %vec = load <3 x float>, <3 x float>* @vec
+; CHECK: %select = load i1, i1* @select
+ %select = load i1, i1* @select
+; CHECK: %arr = load [3 x float], [3 x float]* @arr
+ %arr = load [3 x float], [3 x float]* @arr
; CHECK: %a = fadd nnan ninf float %x, %y
%a = fadd ninf nnan float %x, %y
@@ -111,12 +111,12 @@ entry:
; CHECK: mixed_flags
define float @mixed_flags(float %x, float %y) {
entry:
-; CHECK: %vec = load <3 x float>* @vec
- %vec = load <3 x float>* @vec
-; CHECK: %select = load i1* @select
- %select = load i1* @select
-; CHECK: %arr = load [3 x float]* @arr
- %arr = load [3 x float]* @arr
+; CHECK: %vec = load <3 x float>, <3 x float>* @vec
+ %vec = load <3 x float>, <3 x float>* @vec
+; CHECK: %select = load i1, i1* @select
+ %select = load i1, i1* @select
+; CHECK: %arr = load [3 x float], [3 x float]* @arr
+ %arr = load [3 x float], [3 x float]* @arr
; CHECK: %a = fadd nnan ninf float %x, %y
%a = fadd ninf nnan float %x, %y
Modified: llvm/trunk/test/Assembler/half-constprop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/half-constprop.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/half-constprop.ll (original)
+++ llvm/trunk/test/Assembler/half-constprop.ll Fri Feb 27 15:17:42 2015
@@ -9,8 +9,8 @@ entry:
%.compoundliteral = alloca float, align 4
store half 0xH4200, half* %a, align 2
store half 0xH4B9A, half* %b, align 2
- %tmp = load half* %a, align 2
- %tmp1 = load half* %b, align 2
+ %tmp = load half, half* %a, align 2
+ %tmp1 = load half, half* %b, align 2
%add = fadd half %tmp, %tmp1
; CHECK: 0xH4C8D
ret half %add
Modified: llvm/trunk/test/Assembler/half-conv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/half-conv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/half-conv.ll (original)
+++ llvm/trunk/test/Assembler/half-conv.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ entry:
%a = alloca half, align 2
%.compoundliteral = alloca float, align 4
store half 0xH4C8D, half* %a, align 2
- %tmp = load half* %a, align 2
+ %tmp = load half, half* %a, align 2
%conv = fpext half %tmp to float
; CHECK: 0x4032340000000000
ret float %conv
Modified: llvm/trunk/test/Assembler/insertextractvalue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/insertextractvalue.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/insertextractvalue.ll (original)
+++ llvm/trunk/test/Assembler/insertextractvalue.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
; CHECK-NEXT: store
; CHECK-NEXT: ret
define float @foo({{i32},{float, double}}* %p) nounwind {
- %t = load {{i32},{float, double}}* %p
+ %t = load {{i32},{float, double}}, {{i32},{float, double}}* %p
%s = extractvalue {{i32},{float, double}} %t, 1, 0
%r = insertvalue {{i32},{float, double}} %t, double 2.0, 1, 1
store {{i32},{float, double}} %r, {{i32},{float, double}}* %p
Added: llvm/trunk/test/Assembler/invalid-load-mismatched-explicit-type.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/invalid-load-mismatched-explicit-type.ll?rev=230794&view=auto
==============================================================================
--- llvm/trunk/test/Assembler/invalid-load-mismatched-explicit-type.ll (added)
+++ llvm/trunk/test/Assembler/invalid-load-mismatched-explicit-type.ll Fri Feb 27 15:17:42 2015
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+; CHECK: <stdin>:4:13: error: explicit pointee type doesn't match operand's pointee type
+define void @test(i32* %t) {
+ %x = load i16, i32* %t
+ ret void
+}
Added: llvm/trunk/test/Assembler/invalid-load-missing-explicit-type.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/invalid-load-missing-explicit-type.ll?rev=230794&view=auto
==============================================================================
--- llvm/trunk/test/Assembler/invalid-load-missing-explicit-type.ll (added)
+++ llvm/trunk/test/Assembler/invalid-load-missing-explicit-type.ll Fri Feb 27 15:17:42 2015
@@ -0,0 +1,6 @@
+; RUN: not llvm-as < %s 2>&1 | FileCheck %s
+; CHECK: <stdin>:4:18: error: expected comma after load's type
+define void @test(i32* %t) {
+ %x = load i32* %t
+ ret void
+}
Modified: llvm/trunk/test/Assembler/numbered-values.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/numbered-values.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/numbered-values.ll (original)
+++ llvm/trunk/test/Assembler/numbered-values.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
%0 = alloca i32 ; <i32*>:0 [#uses=2]
%"alloca point" = bitcast i32 0 to i32 ; <i32> [#uses=0]
store i32 %X, i32* %X_addr
- %1 = load i32* %X_addr, align 4 ; <i32>:1 [#uses=1]
+ %1 = load i32, i32* %X_addr, align 4 ; <i32>:1 [#uses=1]
mul i32 %1, 4 ; <i32>:2 [#uses=1]
%3 = add i32 %2, 123 ; <i32>:3 [#uses=1]
store i32 %3, i32* %0, align 4
Modified: llvm/trunk/test/Assembler/unnamed.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/unnamed.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/unnamed.ll (original)
+++ llvm/trunk/test/Assembler/unnamed.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ module asm "this is another inline asm b
@3 = global x86_fp80 0xK4001E000000000000000
define float @foo(%0* %p) nounwind {
- %t = load %0* %p ; <%0> [#uses=2]
+ %t = load %0, %0* %p ; <%0> [#uses=2]
%s = extractvalue %0 %t, 1, 0 ; <float> [#uses=1]
%r = insertvalue %0 %t, double 2.000000e+00, 1, 1; <%0> [#uses=1]
store %0 %r, %0* %p
Modified: llvm/trunk/test/Assembler/upgrade-loop-metadata.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/upgrade-loop-metadata.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/upgrade-loop-metadata.ll (original)
+++ llvm/trunk/test/Assembler/upgrade-loop-metadata.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 16
br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1
@@ -22,7 +22,7 @@ for.body:
br label %for.inc
for.inc: ; preds = %for.body
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%inc = add nsw i32 %1, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
Modified: llvm/trunk/test/Assembler/uselistorder.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Assembler/uselistorder.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Assembler/uselistorder.ll (original)
+++ llvm/trunk/test/Assembler/uselistorder.ll Fri Feb 27 15:17:42 2015
@@ -48,7 +48,7 @@ first:
define i1 @loada() {
entry:
- %a = load i1* getelementptr ([4 x i1]* @a, i64 0, i64 2)
+ %a = load i1, i1* getelementptr ([4 x i1]* @a, i64 0, i64 2)
ret i1 %a
}
Modified: llvm/trunk/test/Bitcode/arm32_neon_vcnt_upgrade.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/arm32_neon_vcnt_upgrade.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Bitcode/arm32_neon_vcnt_upgrade.ll (original)
+++ llvm/trunk/test/Bitcode/arm32_neon_vcnt_upgrade.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@
define <4 x i16> @vclz16(<4 x i16>* %A) nounwind {
;CHECK: @vclz16
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = call <4 x i16> @llvm.arm.neon.vclz.v4i16(<4 x i16> %tmp1)
;CHECK: {{call.*@llvm.ctlz.v4i16\(<4 x i16>.*, i1 false}}
ret <4 x i16> %tmp2
@@ -12,7 +12,7 @@ define <4 x i16> @vclz16(<4 x i16>* %A)
define <8 x i8> @vcnt8(<8 x i8>* %A) nounwind {
;CHECK: @vcnt8
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = call <8 x i8> @llvm.arm.neon.vcnt.v8i8(<8 x i8> %tmp1)
;CHECK: call <8 x i8> @llvm.ctpop.v8i8(<8 x i8>
ret <8 x i8> %tmp2
Modified: llvm/trunk/test/Bitcode/case-ranges-3.3.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/case-ranges-3.3.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Bitcode/case-ranges-3.3.ll (original)
+++ llvm/trunk/test/Bitcode/case-ranges-3.3.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define i32 @foo(i32 %x) nounwind ssp uwt
%1 = alloca i32, align 4
%2 = alloca i32, align 4
store i32 %x, i32* %2, align 4
- %3 = load i32* %2, align 4
+ %3 = load i32, i32* %2, align 4
switch i32 %3, label %9 [
; CHECK: switch i32 %3, label %9
i32 -3, label %4
@@ -63,6 +63,6 @@ define i32 @foo(i32 %x) nounwind ssp uwt
br label %11
; <label>:11
- %12 = load i32* %1
+ %12 = load i32, i32* %1
ret i32 %12
}
Modified: llvm/trunk/test/Bitcode/function-encoding-rel-operands.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/function-encoding-rel-operands.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Bitcode/function-encoding-rel-operands.ll (original)
+++ llvm/trunk/test/Bitcode/function-encoding-rel-operands.ll Fri Feb 27 15:17:42 2015
@@ -44,7 +44,7 @@ define double @test_float_binops(i32 %a)
define i1 @test_load(i32 %a, {i32, i32}* %ptr) nounwind {
entry:
%0 = getelementptr inbounds {i32, i32}, {i32, i32}* %ptr, i32 %a, i32 0
- %1 = load i32* %0
+ %1 = load i32, i32* %0
%2 = icmp eq i32 %1, %a
ret i1 %2
}
Modified: llvm/trunk/test/Bitcode/memInstructions.3.2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/memInstructions.3.2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Bitcode/memInstructions.3.2.ll (original)
+++ llvm/trunk/test/Bitcode/memInstructions.3.2.ll Fri Feb 27 15:17:42 2015
@@ -27,53 +27,53 @@ entry:
%ptr1 = alloca i8
store i8 2, i8* %ptr1
-; CHECK: %res1 = load i8* %ptr1
- %res1 = load i8* %ptr1
+; CHECK: %res1 = load i8, i8* %ptr1
+ %res1 = load i8, i8* %ptr1
-; CHECK-NEXT: %res2 = load volatile i8* %ptr1
- %res2 = load volatile i8* %ptr1
+; CHECK-NEXT: %res2 = load volatile i8, i8* %ptr1
+ %res2 = load volatile i8, i8* %ptr1
-; CHECK-NEXT: %res3 = load i8* %ptr1, align 1
- %res3 = load i8* %ptr1, align 1
+; CHECK-NEXT: %res3 = load i8, i8* %ptr1, align 1
+ %res3 = load i8, i8* %ptr1, align 1
-; CHECK-NEXT: %res4 = load volatile i8* %ptr1, align 1
- %res4 = load volatile i8* %ptr1, align 1
+; CHECK-NEXT: %res4 = load volatile i8, i8* %ptr1, align 1
+ %res4 = load volatile i8, i8* %ptr1, align 1
-; CHECK-NEXT: %res5 = load i8* %ptr1, !nontemporal !0
- %res5 = load i8* %ptr1, !nontemporal !0
+; CHECK-NEXT: %res5 = load i8, i8* %ptr1, !nontemporal !0
+ %res5 = load i8, i8* %ptr1, !nontemporal !0
-; CHECK-NEXT: %res6 = load volatile i8* %ptr1, !nontemporal !0
- %res6 = load volatile i8* %ptr1, !nontemporal !0
+; CHECK-NEXT: %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
+ %res6 = load volatile i8, i8* %ptr1, !nontemporal !0
-; CHECK-NEXT: %res7 = load i8* %ptr1, align 1, !nontemporal !0
- %res7 = load i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
+ %res7 = load i8, i8* %ptr1, align 1, !nontemporal !0
-; CHECK-NEXT: %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
- %res8 = load volatile i8* %ptr1, align 1, !nontemporal !0
+; CHECK-NEXT: %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
+ %res8 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0
-; CHECK-NEXT: %res9 = load i8* %ptr1, !invariant.load !1
- %res9 = load i8* %ptr1, !invariant.load !1
+; CHECK-NEXT: %res9 = load i8, i8* %ptr1, !invariant.load !1
+ %res9 = load i8, i8* %ptr1, !invariant.load !1
-; CHECK-NEXT: %res10 = load volatile i8* %ptr1, !invariant.load !1
- %res10 = load volatile i8* %ptr1, !invariant.load !1
+; CHECK-NEXT: %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
+ %res10 = load volatile i8, i8* %ptr1, !invariant.load !1
-; CHECK-NEXT: %res11 = load i8* %ptr1, align 1, !invariant.load !1
- %res11 = load i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
+ %res11 = load i8, i8* %ptr1, align 1, !invariant.load !1
-; CHECK-NEXT: %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
- %res12 = load volatile i8* %ptr1, align 1, !invariant.load !1
+; CHECK-NEXT: %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
+ %res12 = load volatile i8, i8* %ptr1, align 1, !invariant.load !1
-; CHECK-NEXT: %res13 = load i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
- %res13 = load i8* %ptr1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res13 = load i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res13 = load i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
-; CHECK-NEXT: %res14 = load volatile i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
- %res14 = load volatile i8* %ptr1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res14 = load volatile i8, i8* %ptr1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res14 = load volatile i8, i8* %ptr1, !nontemporal !0, !invariant.load !1
-; CHECK-NEXT: %res15 = load i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
- %res15 = load i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res15 = load i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res15 = load i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
-; CHECK-NEXT: %res16 = load volatile i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
- %res16 = load volatile i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
+; CHECK-NEXT: %res16 = load volatile i8, i8* %ptr1, align 1, {{[(!nontemporal !0, !invariant.load !1) | (!invariant.load !1, !nontemporal !0)]}}
+ %res16 = load volatile i8, i8* %ptr1, align 1, !nontemporal !0, !invariant.load !1
ret void
}
@@ -83,53 +83,53 @@ entry:
%ptr1 = alloca i8
store i8 2, i8* %ptr1
-; CHECK: %res1 = load atomic i8* %ptr1 unordered, align 1
- %res1 = load atomic i8* %ptr1 unordered, align 1
+; CHECK: %res1 = load atomic i8, i8* %ptr1 unordered, align 1
+ %res1 = load atomic i8, i8* %ptr1 unordered, align 1
-; CHECK-NEXT: %res2 = load atomic i8* %ptr1 monotonic, align 1
- %res2 = load atomic i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
+ %res2 = load atomic i8, i8* %ptr1 monotonic, align 1
-; CHECK-NEXT: %res3 = load atomic i8* %ptr1 acquire, align 1
- %res3 = load atomic i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res3 = load atomic i8, i8* %ptr1 acquire, align 1
+ %res3 = load atomic i8, i8* %ptr1 acquire, align 1
-; CHECK-NEXT: %res4 = load atomic i8* %ptr1 seq_cst, align 1
- %res4 = load atomic i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
+ %res4 = load atomic i8, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: %res5 = load atomic volatile i8* %ptr1 unordered, align 1
- %res5 = load atomic volatile i8* %ptr1 unordered, align 1
+; CHECK-NEXT: %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
+ %res5 = load atomic volatile i8, i8* %ptr1 unordered, align 1
-; CHECK-NEXT: %res6 = load atomic volatile i8* %ptr1 monotonic, align 1
- %res6 = load atomic volatile i8* %ptr1 monotonic, align 1
+; CHECK-NEXT: %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
+ %res6 = load atomic volatile i8, i8* %ptr1 monotonic, align 1
-; CHECK-NEXT: %res7 = load atomic volatile i8* %ptr1 acquire, align 1
- %res7 = load atomic volatile i8* %ptr1 acquire, align 1
+; CHECK-NEXT: %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
+ %res7 = load atomic volatile i8, i8* %ptr1 acquire, align 1
-; CHECK-NEXT: %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
- %res8 = load atomic volatile i8* %ptr1 seq_cst, align 1
+; CHECK-NEXT: %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
+ %res8 = load atomic volatile i8, i8* %ptr1 seq_cst, align 1
-; CHECK-NEXT: %res9 = load atomic i8* %ptr1 singlethread unordered, align 1
- %res9 = load atomic i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
+ %res9 = load atomic i8, i8* %ptr1 singlethread unordered, align 1
-; CHECK-NEXT: %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
- %res10 = load atomic i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
+ %res10 = load atomic i8, i8* %ptr1 singlethread monotonic, align 1
-; CHECK-NEXT: %res11 = load atomic i8* %ptr1 singlethread acquire, align 1
- %res11 = load atomic i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
+ %res11 = load atomic i8, i8* %ptr1 singlethread acquire, align 1
-; CHECK-NEXT: %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
- %res12 = load atomic i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
+ %res12 = load atomic i8, i8* %ptr1 singlethread seq_cst, align 1
-; CHECK-NEXT: %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
- %res13 = load atomic volatile i8* %ptr1 singlethread unordered, align 1
+; CHECK-NEXT: %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
+ %res13 = load atomic volatile i8, i8* %ptr1 singlethread unordered, align 1
-; CHECK-NEXT: %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
- %res14 = load atomic volatile i8* %ptr1 singlethread monotonic, align 1
+; CHECK-NEXT: %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
+ %res14 = load atomic volatile i8, i8* %ptr1 singlethread monotonic, align 1
-; CHECK-NEXT: %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
- %res15 = load atomic volatile i8* %ptr1 singlethread acquire, align 1
+; CHECK-NEXT: %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
+ %res15 = load atomic volatile i8, i8* %ptr1 singlethread acquire, align 1
-; CHECK-NEXT: %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
- %res16 = load atomic volatile i8* %ptr1 singlethread seq_cst, align 1
+; CHECK-NEXT: %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
+ %res16 = load atomic volatile i8, i8* %ptr1 singlethread seq_cst, align 1
ret void
}
Modified: llvm/trunk/test/Bitcode/metadata-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/metadata-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Bitcode/metadata-2.ll (original)
+++ llvm/trunk/test/Bitcode/metadata-2.ll Fri Feb 27 15:17:42 2015
@@ -77,7 +77,7 @@ entry:
define internal void @_D5tango4core8BitManip16__moduleinfoCtorZ() nounwind {
moduleinfoCtorEntry:
- %current = load %ModuleReference** @_Dmodule_ref ; <%ModuleReference*> [#uses=1]
+ %current = load %ModuleReference*, %ModuleReference** @_Dmodule_ref ; <%ModuleReference*> [#uses=1]
store %ModuleReference* %current, %ModuleReference** getelementptr (%ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, i32 0, i32 0)
store %ModuleReference* @_D5tango4core8BitManip11__moduleRefZ, %ModuleReference** @_Dmodule_ref
ret void
Modified: llvm/trunk/test/Bitcode/upgrade-loop-metadata.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/upgrade-loop-metadata.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Bitcode/upgrade-loop-metadata.ll (original)
+++ llvm/trunk/test/Bitcode/upgrade-loop-metadata.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %0 = load i32* %i, align 4
+ %0 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %0, 16
br i1 %cmp, label %for.body, label %for.end, !llvm.loop !1
@@ -18,7 +18,7 @@ for.body:
br label %for.inc
for.inc: ; preds = %for.body
- %1 = load i32* %i, align 4
+ %1 = load i32, i32* %i, align 4
%inc = add nsw i32 %1, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
Modified: llvm/trunk/test/Bitcode/use-list-order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/use-list-order.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/Bitcode/use-list-order.ll (original)
+++ llvm/trunk/test/Bitcode/use-list-order.ll Fri Feb 27 15:17:42 2015
@@ -79,13 +79,13 @@ entry:
define i1 @loadb() {
entry:
- %b = load i1* @b
+ %b = load i1, i1* @b
ret i1 %b
}
define i1 @loada() {
entry:
- %a = load i1* getelementptr ([4 x i1]* @a, i64 0, i64 2)
+ %a = load i1, i1* getelementptr ([4 x i1]* @a, i64 0, i64 2)
ret i1 %a
}
@@ -115,7 +115,7 @@ first:
define i4 @globalAndFunctionFunctionUser() {
entry:
- %local = load i4* @globalAndFunction
+ %local = load i4, i4* @globalAndFunction
ret i4 %local
}
Modified: llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/128bit_load_store.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define fp128 @test_load_f128(fp128* read
; CHECK-LABEL: test_load_f128
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
entry:
- %0 = load fp128* %ptr, align 16
+ %0 = load fp128, fp128* %ptr, align 16
ret fp128 %0
}
@@ -33,7 +33,7 @@ define i128 @test_vldrq_p128(i128* reado
entry:
%0 = bitcast i128* %ptr to fp128*
- %1 = load fp128* %0, align 16
+ %1 = load fp128, fp128* %0, align 16
%2 = bitcast fp128 %1 to i128
ret i128 %2
}
@@ -44,7 +44,7 @@ define void @test_ld_st_p128(i128* nocap
; CHECK-NEXT: str {{q[0-9]+}}, [{{x[0-9]+}}, #16]
entry:
%0 = bitcast i128* %ptr to fp128*
- %1 = load fp128* %0, align 16
+ %1 = load fp128, fp128* %0, align 16
%add.ptr = getelementptr inbounds i128, i128* %ptr, i64 1
%2 = bitcast i128* %add.ptr to fp128*
store fp128 %1, fp128* %2, align 16
Modified: llvm/trunk/test/CodeGen/AArch64/PBQP-chain.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/PBQP-chain.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/PBQP-chain.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/PBQP-chain.ll Fri Feb 27 15:17:42 2015
@@ -22,79 +22,79 @@ target triple = "aarch64"
; CHECK-ODD: fmadd {{d[0-9]*[13579]}}, {{d[0-9]*}}, {{d[0-9]*}}, {{d[0-9]*[13579]}}
define void @fir(double* nocapture %rx, double* nocapture %ry, double* nocapture %c, double* nocapture %x, double* nocapture %y) {
entry:
- %0 = load double* %c, align 8
- %1 = load double* %x, align 8
+ %0 = load double, double* %c, align 8
+ %1 = load double, double* %x, align 8
%mul = fmul fast double %1, %0
- %2 = load double* %y, align 8
+ %2 = load double, double* %y, align 8
%mul7 = fmul fast double %2, %0
%arrayidx.1 = getelementptr inbounds double, double* %c, i64 1
- %3 = load double* %arrayidx.1, align 8
+ %3 = load double, double* %arrayidx.1, align 8
%arrayidx2.1 = getelementptr inbounds double, double* %x, i64 1
- %4 = load double* %arrayidx2.1, align 8
+ %4 = load double, double* %arrayidx2.1, align 8
%mul.1 = fmul fast double %4, %3
%add.1 = fadd fast double %mul.1, %mul
%arrayidx6.1 = getelementptr inbounds double, double* %y, i64 1
- %5 = load double* %arrayidx6.1, align 8
+ %5 = load double, double* %arrayidx6.1, align 8
%mul7.1 = fmul fast double %5, %3
%add8.1 = fadd fast double %mul7.1, %mul7
%arrayidx.2 = getelementptr inbounds double, double* %c, i64 2
- %6 = load double* %arrayidx.2, align 8
+ %6 = load double, double* %arrayidx.2, align 8
%arrayidx2.2 = getelementptr inbounds double, double* %x, i64 2
- %7 = load double* %arrayidx2.2, align 8
+ %7 = load double, double* %arrayidx2.2, align 8
%mul.2 = fmul fast double %7, %6
%add.2 = fadd fast double %mul.2, %add.1
%arrayidx6.2 = getelementptr inbounds double, double* %y, i64 2
- %8 = load double* %arrayidx6.2, align 8
+ %8 = load double, double* %arrayidx6.2, align 8
%mul7.2 = fmul fast double %8, %6
%add8.2 = fadd fast double %mul7.2, %add8.1
%arrayidx.3 = getelementptr inbounds double, double* %c, i64 3
- %9 = load double* %arrayidx.3, align 8
+ %9 = load double, double* %arrayidx.3, align 8
%arrayidx2.3 = getelementptr inbounds double, double* %x, i64 3
- %10 = load double* %arrayidx2.3, align 8
+ %10 = load double, double* %arrayidx2.3, align 8
%mul.3 = fmul fast double %10, %9
%add.3 = fadd fast double %mul.3, %add.2
%arrayidx6.3 = getelementptr inbounds double, double* %y, i64 3
- %11 = load double* %arrayidx6.3, align 8
+ %11 = load double, double* %arrayidx6.3, align 8
%mul7.3 = fmul fast double %11, %9
%add8.3 = fadd fast double %mul7.3, %add8.2
%arrayidx.4 = getelementptr inbounds double, double* %c, i64 4
- %12 = load double* %arrayidx.4, align 8
+ %12 = load double, double* %arrayidx.4, align 8
%arrayidx2.4 = getelementptr inbounds double, double* %x, i64 4
- %13 = load double* %arrayidx2.4, align 8
+ %13 = load double, double* %arrayidx2.4, align 8
%mul.4 = fmul fast double %13, %12
%add.4 = fadd fast double %mul.4, %add.3
%arrayidx6.4 = getelementptr inbounds double, double* %y, i64 4
- %14 = load double* %arrayidx6.4, align 8
+ %14 = load double, double* %arrayidx6.4, align 8
%mul7.4 = fmul fast double %14, %12
%add8.4 = fadd fast double %mul7.4, %add8.3
%arrayidx.5 = getelementptr inbounds double, double* %c, i64 5
- %15 = load double* %arrayidx.5, align 8
+ %15 = load double, double* %arrayidx.5, align 8
%arrayidx2.5 = getelementptr inbounds double, double* %x, i64 5
- %16 = load double* %arrayidx2.5, align 8
+ %16 = load double, double* %arrayidx2.5, align 8
%mul.5 = fmul fast double %16, %15
%add.5 = fadd fast double %mul.5, %add.4
%arrayidx6.5 = getelementptr inbounds double, double* %y, i64 5
- %17 = load double* %arrayidx6.5, align 8
+ %17 = load double, double* %arrayidx6.5, align 8
%mul7.5 = fmul fast double %17, %15
%add8.5 = fadd fast double %mul7.5, %add8.4
%arrayidx.6 = getelementptr inbounds double, double* %c, i64 6
- %18 = load double* %arrayidx.6, align 8
+ %18 = load double, double* %arrayidx.6, align 8
%arrayidx2.6 = getelementptr inbounds double, double* %x, i64 6
- %19 = load double* %arrayidx2.6, align 8
+ %19 = load double, double* %arrayidx2.6, align 8
%mul.6 = fmul fast double %19, %18
%add.6 = fadd fast double %mul.6, %add.5
%arrayidx6.6 = getelementptr inbounds double, double* %y, i64 6
- %20 = load double* %arrayidx6.6, align 8
+ %20 = load double, double* %arrayidx6.6, align 8
%mul7.6 = fmul fast double %20, %18
%add8.6 = fadd fast double %mul7.6, %add8.5
%arrayidx.7 = getelementptr inbounds double, double* %c, i64 7
- %21 = load double* %arrayidx.7, align 8
+ %21 = load double, double* %arrayidx.7, align 8
%arrayidx2.7 = getelementptr inbounds double, double* %x, i64 7
- %22 = load double* %arrayidx2.7, align 8
+ %22 = load double, double* %arrayidx2.7, align 8
%mul.7 = fmul fast double %22, %21
%add.7 = fadd fast double %mul.7, %add.6
%arrayidx6.7 = getelementptr inbounds double, double* %y, i64 7
- %23 = load double* %arrayidx6.7, align 8
+ %23 = load double, double* %arrayidx6.7, align 8
%mul7.7 = fmul fast double %23, %21
%add8.7 = fadd fast double %mul7.7, %add8.6
store double %add.7, double* %rx, align 8
Modified: llvm/trunk/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/PBQP-coalesce-benefit.ll Fri Feb 27 15:17:42 2015
@@ -3,11 +3,11 @@
; CHECK-LABEL: test:
define i32 @test(i32 %acc, i32* nocapture readonly %c) {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
; CHECK-NOT: mov w{{[0-9]*}}, w0
%add = add nsw i32 %0, %acc
%arrayidx1 = getelementptr inbounds i32, i32* %c, i64 1
- %1 = load i32* %arrayidx1, align 4
+ %1 = load i32, i32* %arrayidx1, align 4
%add2 = add nsw i32 %add, %1
ret i32 %add2
}
Modified: llvm/trunk/test/CodeGen/AArch64/PBQP-csr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/PBQP-csr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/PBQP-csr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/PBQP-csr.ll Fri Feb 27 15:17:42 2015
@@ -23,15 +23,15 @@ entry:
%na = getelementptr inbounds %rs, %rs* %r, i64 0, i32 0
%0 = bitcast double* %x.i to i8*
call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 72, i32 8, i1 false)
- %1 = load i32* %na, align 4
+ %1 = load i32, i32* %na, align 4
%cmp70 = icmp sgt i32 %1, 0
br i1 %cmp70, label %for.body.lr.ph, label %for.end
for.body.lr.ph: ; preds = %entry
%fn = getelementptr inbounds %rs, %rs* %r, i64 0, i32 4
- %2 = load %v** %fn, align 8
+ %2 = load %v*, %v** %fn, align 8
%fs = getelementptr inbounds %rs, %rs* %r, i64 0, i32 5
- %3 = load %v** %fs, align 8
+ %3 = load %v*, %v** %fs, align 8
%4 = sext i32 %1 to i64
br label %for.body
@@ -46,27 +46,27 @@ for.body:
%x1.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 0
%y.i56 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 1
%10 = bitcast double* %x.i54 to <2 x double>*
- %11 = load <2 x double>* %10, align 8
+ %11 = load <2 x double>, <2 x double>* %10, align 8
%y2.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 1
%12 = bitcast double* %x1.i to <2 x double>*
- %13 = load <2 x double>* %12, align 8
+ %13 = load <2 x double>, <2 x double>* %12, align 8
%14 = fadd fast <2 x double> %13, %11
%z.i57 = getelementptr inbounds %v, %v* %2, i64 %indvars.iv, i32 2
- %15 = load double* %z.i57, align 8
+ %15 = load double, double* %z.i57, align 8
%z4.i = getelementptr inbounds %v, %v* %3, i64 %indvars.iv, i32 2
- %16 = load double* %z4.i, align 8
+ %16 = load double, double* %z4.i, align 8
%add5.i = fadd fast double %16, %15
%17 = fadd fast <2 x double> %6, %11
%18 = bitcast double* %x.i to <2 x double>*
store <2 x double> %17, <2 x double>* %18, align 8
- %19 = load double* %x1.i, align 8
+ %19 = load double, double* %x1.i, align 8
%20 = insertelement <2 x double> undef, double %15, i32 0
%21 = insertelement <2 x double> %20, double %19, i32 1
%22 = fadd fast <2 x double> %7, %21
%23 = bitcast double* %z.i to <2 x double>*
store <2 x double> %22, <2 x double>* %23, align 8
%24 = bitcast double* %y2.i to <2 x double>*
- %25 = load <2 x double>* %24, align 8
+ %25 = load <2 x double>, <2 x double>* %24, align 8
%26 = fadd fast <2 x double> %8, %25
%27 = bitcast double* %y.i62 to <2 x double>*
store <2 x double> %26, <2 x double>* %27, align 8
Modified: llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/Redundantstore.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ target datalayout = "e-m:e-i64:64-f80:12
; CHECK-NOT: stur
define i8* @test(i32 %size) {
entry:
- %0 = load i8** @end_of_array, align 8
+ %0 = load i8*, i8** @end_of_array, align 8
%conv = sext i32 %size to i64
%and = and i64 %conv, -8
%conv2 = trunc i64 %and to i32
Modified: llvm/trunk/test/CodeGen/AArch64/a57-csel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/a57-csel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/a57-csel.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/a57-csel.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
; Check that the select is expanded into a branch sequence.
define i64 @f(i64 %a, i64 %b, i64* %c, i64 %d, i64 %e) {
; CHECK: cbz
- %x0 = load i64* %c
+ %x0 = load i64, i64* %c
%x1 = icmp eq i64 %x0, 0
%x2 = select i1 %x1, i64 %a, i64 %b
%x3 = add i64 %x2, %d
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-2014-08-11-MachineCombinerCrash.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
for.body: ; preds = %for.body, %entry
%arrayidx5 = getelementptr inbounds i32, i32* null, i64 1, !dbg !43
- %0 = load i32* null, align 4, !dbg !45, !tbaa !46
+ %0 = load i32, i32* null, align 4, !dbg !45, !tbaa !46
%s1 = sub nsw i32 0, %0, !dbg !50
%n1 = sext i32 %s1 to i64, !dbg !50
%arrayidx21 = getelementptr inbounds i32, i32* null, i64 3, !dbg !51
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-2014-12-02-combine-soften.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define void @foo() {
entry:
;CHECK-LABEL: foo:
;CHECK: __floatsisf
- %0 = load i32* @x, align 4
+ %0 = load i32, i32* @x, align 4
%conv = sitofp i32 %0 to float
store float %conv, float* bitcast (i32* @t to float*), align 4
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-a57-fp-load-balancing.ll Fri Feb 27 15:17:42 2015
@@ -29,15 +29,15 @@ target triple = "aarch64"
define void @f1(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
- %0 = load double* %p, align 8
+ %0 = load double, double* %p, align 8
%arrayidx1 = getelementptr inbounds double, double* %p, i64 1
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%arrayidx2 = getelementptr inbounds double, double* %p, i64 2
- %2 = load double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %p, i64 3
- %3 = load double* %arrayidx3, align 8
+ %3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %p, i64 4
- %4 = load double* %arrayidx4, align 8
+ %4 = load double, double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
%mul5 = fmul fast double %1, %2
@@ -48,11 +48,11 @@ entry:
%add9 = fadd fast double %mul8, %sub
store double %add9, double* %q, align 8
%arrayidx11 = getelementptr inbounds double, double* %p, i64 5
- %5 = load double* %arrayidx11, align 8
+ %5 = load double, double* %arrayidx11, align 8
%arrayidx12 = getelementptr inbounds double, double* %p, i64 6
- %6 = load double* %arrayidx12, align 8
+ %6 = load double, double* %arrayidx12, align 8
%arrayidx13 = getelementptr inbounds double, double* %p, i64 7
- %7 = load double* %arrayidx13, align 8
+ %7 = load double, double* %arrayidx13, align 8
%mul15 = fmul fast double %6, %7
%mul16 = fmul fast double %0, %5
%add17 = fadd fast double %mul16, %mul15
@@ -81,21 +81,21 @@ entry:
define void @f2(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
- %0 = load double* %p, align 8
+ %0 = load double, double* %p, align 8
%arrayidx1 = getelementptr inbounds double, double* %p, i64 1
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%arrayidx2 = getelementptr inbounds double, double* %p, i64 2
- %2 = load double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %p, i64 3
- %3 = load double* %arrayidx3, align 8
+ %3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %p, i64 4
- %4 = load double* %arrayidx4, align 8
+ %4 = load double, double* %arrayidx4, align 8
%arrayidx5 = getelementptr inbounds double, double* %p, i64 5
- %5 = load double* %arrayidx5, align 8
+ %5 = load double, double* %arrayidx5, align 8
%arrayidx6 = getelementptr inbounds double, double* %p, i64 6
- %6 = load double* %arrayidx6, align 8
+ %6 = load double, double* %arrayidx6, align 8
%arrayidx7 = getelementptr inbounds double, double* %p, i64 7
- %7 = load double* %arrayidx7, align 8
+ %7 = load double, double* %arrayidx7, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %7
%mul8 = fmul fast double %5, %6
@@ -127,15 +127,15 @@ entry:
define void @f3(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
- %0 = load double* %p, align 8
+ %0 = load double, double* %p, align 8
%arrayidx1 = getelementptr inbounds double, double* %p, i64 1
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%arrayidx2 = getelementptr inbounds double, double* %p, i64 2
- %2 = load double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %p, i64 3
- %3 = load double* %arrayidx3, align 8
+ %3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %p, i64 4
- %4 = load double* %arrayidx4, align 8
+ %4 = load double, double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
%mul5 = fmul fast double %1, %2
@@ -176,21 +176,21 @@ declare void @g(...) #1
define void @f4(float* nocapture readonly %p, float* nocapture %q) #0 {
entry:
- %0 = load float* %p, align 4
+ %0 = load float, float* %p, align 4
%arrayidx1 = getelementptr inbounds float, float* %p, i64 1
- %1 = load float* %arrayidx1, align 4
+ %1 = load float, float* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds float, float* %p, i64 2
- %2 = load float* %arrayidx2, align 4
+ %2 = load float, float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float, float* %p, i64 3
- %3 = load float* %arrayidx3, align 4
+ %3 = load float, float* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds float, float* %p, i64 4
- %4 = load float* %arrayidx4, align 4
+ %4 = load float, float* %arrayidx4, align 4
%arrayidx5 = getelementptr inbounds float, float* %p, i64 5
- %5 = load float* %arrayidx5, align 4
+ %5 = load float, float* %arrayidx5, align 4
%arrayidx6 = getelementptr inbounds float, float* %p, i64 6
- %6 = load float* %arrayidx6, align 4
+ %6 = load float, float* %arrayidx6, align 4
%arrayidx7 = getelementptr inbounds float, float* %p, i64 7
- %7 = load float* %arrayidx7, align 4
+ %7 = load float, float* %arrayidx7, align 4
%mul = fmul fast float %0, %1
%add = fadd fast float %mul, %7
%mul8 = fmul fast float %5, %6
@@ -222,15 +222,15 @@ entry:
define void @f5(float* nocapture readonly %p, float* nocapture %q) #0 {
entry:
- %0 = load float* %p, align 4
+ %0 = load float, float* %p, align 4
%arrayidx1 = getelementptr inbounds float, float* %p, i64 1
- %1 = load float* %arrayidx1, align 4
+ %1 = load float, float* %arrayidx1, align 4
%arrayidx2 = getelementptr inbounds float, float* %p, i64 2
- %2 = load float* %arrayidx2, align 4
+ %2 = load float, float* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds float, float* %p, i64 3
- %3 = load float* %arrayidx3, align 4
+ %3 = load float, float* %arrayidx3, align 4
%arrayidx4 = getelementptr inbounds float, float* %p, i64 4
- %4 = load float* %arrayidx4, align 4
+ %4 = load float, float* %arrayidx4, align 4
%mul = fmul fast float %0, %1
%add = fadd fast float %mul, %4
%mul5 = fmul fast float %1, %2
@@ -264,15 +264,15 @@ if.end:
define void @f6(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
- %0 = load double* %p, align 8
+ %0 = load double, double* %p, align 8
%arrayidx1 = getelementptr inbounds double, double* %p, i64 1
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%arrayidx2 = getelementptr inbounds double, double* %p, i64 2
- %2 = load double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %p, i64 3
- %3 = load double* %arrayidx3, align 8
+ %3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %p, i64 4
- %4 = load double* %arrayidx4, align 8
+ %4 = load double, double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
%mul5 = fmul fast double %1, %2
@@ -299,15 +299,15 @@ declare double @hh(double) #1
define void @f7(double* nocapture readonly %p, double* nocapture %q) #0 {
entry:
- %0 = load double* %p, align 8
+ %0 = load double, double* %p, align 8
%arrayidx1 = getelementptr inbounds double, double* %p, i64 1
- %1 = load double* %arrayidx1, align 8
+ %1 = load double, double* %arrayidx1, align 8
%arrayidx2 = getelementptr inbounds double, double* %p, i64 2
- %2 = load double* %arrayidx2, align 8
+ %2 = load double, double* %arrayidx2, align 8
%arrayidx3 = getelementptr inbounds double, double* %p, i64 3
- %3 = load double* %arrayidx3, align 8
+ %3 = load double, double* %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, double* %p, i64 4
- %4 = load double* %arrayidx4, align 8
+ %4 = load double, double* %arrayidx4, align 8
%mul = fmul fast double %0, %1
%add = fadd fast double %mul, %4
%mul5 = fmul fast double %1, %2
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion-assertion.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@ invoke.cont145:
br i1 %or.cond, label %if.then274, label %invoke.cont145
if.then274:
- %0 = load i32* null, align 4
+ %0 = load i32, i32* null, align 4
br i1 undef, label %invoke.cont291, label %if.else313
invoke.cont291:
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-address-type-promotion.ll Fri Feb 27 15:17:42 2015
@@ -15,11 +15,11 @@ entry:
%add = add nsw i32 %i, 1
%idxprom = sext i32 %add to i64
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %idxprom
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add1 = add nsw i32 %i, 2
%idxprom2 = sext i32 %add1 to i64
%arrayidx3 = getelementptr inbounds i32, i32* %a, i64 %idxprom2
- %1 = load i32* %arrayidx3, align 4
+ %1 = load i32, i32* %arrayidx3, align 4
%add4 = add nsw i32 %1, %0
%idxprom5 = sext i32 %i to i64
%arrayidx6 = getelementptr inbounds i32, i32* %a, i64 %idxprom5
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-be-bv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-be-bv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-be-bv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-be-bv.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define i16 @movi_modimm_t1() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -20,7 +20,7 @@ define i16 @movi_modimm_t2() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #8
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -32,7 +32,7 @@ define i16 @movi_modimm_t3() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #16
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -44,7 +44,7 @@ define i16 @movi_modimm_t4() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, lsl #24
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -56,7 +56,7 @@ define i16 @movi_modimm_t5() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #0x1
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -68,7 +68,7 @@ define i16 @movi_modimm_t6() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].8h, #0x1, lsl #8
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -80,7 +80,7 @@ define i16 @movi_modimm_t7() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, msl #8
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 511, i16 0, i16 511, i16 0, i16 511, i16 0, i16 511, i16 0>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -92,7 +92,7 @@ define i16 @movi_modimm_t8() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].4s, #0x1, msl #16
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -104,7 +104,7 @@ define i16 @movi_modimm_t9() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].16b, #0x1
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -116,7 +116,7 @@ define i16 @movi_modimm_t10() nounwind {
; CHECK-NEXT: movi v[[REG2:[0-9]+]].2d, #0x00ffff0000ffff
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0, i16 -1, i16 0>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -128,7 +128,7 @@ define i16 @fmov_modimm_t11() nounwind {
; CHECK-NEXT: fmov v[[REG2:[0-9]+]].4s, #3.00000000
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 0, i16 16448, i16 0, i16 16448, i16 0, i16 16448, i16 0, i16 16448>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -140,7 +140,7 @@ define i16 @fmov_modimm_t12() nounwind {
; CHECK-NEXT: fmov v[[REG2:[0-9]+]].2d, #0.17968750
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 0, i16 0, i16 0, i16 16327, i16 0, i16 0, i16 0, i16 16327>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -152,7 +152,7 @@ define i16 @mvni_modimm_t1() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -164,7 +164,7 @@ define i16 @mvni_modimm_t2() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #8
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -176,7 +176,7 @@ define i16 @mvni_modimm_t3() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #16
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -188,7 +188,7 @@ define i16 @mvni_modimm_t4() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, lsl #24
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -200,7 +200,7 @@ define i16 @mvni_modimm_t5() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #0x1
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -212,7 +212,7 @@ define i16 @mvni_modimm_t6() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].8h, #0x1, lsl #8
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -224,7 +224,7 @@ define i16 @mvni_modimm_t7() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, msl #8
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 65024, i16 65535, i16 65024, i16 65535, i16 65024, i16 65535, i16 65024, i16 65535>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -236,7 +236,7 @@ define i16 @mvni_modimm_t8() nounwind {
; CHECK-NEXT: mvni v[[REG2:[0-9]+]].4s, #0x1, msl #16
; CHECK-NEXT: add v[[REG1]].8h, v[[REG1]].8h, v[[REG2]].8h
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = add <8 x i16> %in, <i16 0, i16 65534, i16 0, i16 65534, i16 0, i16 65534, i16 0, i16 65534>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -247,7 +247,7 @@ define i16 @bic_modimm_t1() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = and <8 x i16> %in, <i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -258,7 +258,7 @@ define i16 @bic_modimm_t2() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #8
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = and <8 x i16> %in, <i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -269,7 +269,7 @@ define i16 @bic_modimm_t3() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #16
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = and <8 x i16> %in, <i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534, i16 65535, i16 65534>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -280,7 +280,7 @@ define i16 @bic_modimm_t4() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: bic v[[REG2:[0-9]+]].4s, #0x1, lsl #24
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = and <8 x i16> %in, <i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279, i16 65535, i16 65279>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -291,7 +291,7 @@ define i16 @bic_modimm_t5() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #0x1
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = and <8 x i16> %in, <i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534, i16 65534>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -302,7 +302,7 @@ define i16 @bic_modimm_t6() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: bic v[[REG2:[0-9]+]].8h, #0x1, lsl #8
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = and <8 x i16> %in, <i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279, i16 65279>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -313,7 +313,7 @@ define i16 @orr_modimm_t1() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = or <8 x i16> %in, <i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -324,7 +324,7 @@ define i16 @orr_modimm_t2() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #8
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = or <8 x i16> %in, <i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -335,7 +335,7 @@ define i16 @orr_modimm_t3() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #16
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = or <8 x i16> %in, <i16 0, i16 1, i16 0, i16 1, i16 0, i16 1, i16 0, i16 1>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -346,7 +346,7 @@ define i16 @orr_modimm_t4() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: orr v[[REG2:[0-9]+]].4s, #0x1, lsl #24
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = or <8 x i16> %in, <i16 0, i16 256, i16 0, i16 256, i16 0, i16 256, i16 0, i16 256>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -357,7 +357,7 @@ define i16 @orr_modimm_t5() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #0x1
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = or <8 x i16> %in, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
@@ -368,7 +368,7 @@ define i16 @orr_modimm_t6() nounwind {
; CHECK: ld1 { v[[REG1:[0-9]+]].8h }, [x{{[0-9]+}}]
; CHECK-NEXT: orr v[[REG2:[0-9]+]].8h, #0x1, lsl #8
; CHECK-NEXT: umov w{{[0-9]+}}, v[[REG1]].h[0]
- %in = load <8 x i16>* @vec_v8i16
+ %in = load <8 x i16>, <8 x i16>* @vec_v8i16
%rv = or <8 x i16> %in, <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>
%el = extractelement <8 x i16> %rv, i32 0
ret i16 %el
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-fix-cortex-a53-835769.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ target triple = "aarch64--linux-gnu"
define i64 @f_load_madd_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = mul nsw i64 %0, %b
%add = add nsw i64 %mul, %a
ret i64 %add
@@ -41,7 +41,7 @@ entry:
define i32 @f_load_madd_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = mul nsw i32 %0, %b
%add = add nsw i32 %mul, %a
ret i32 %add
@@ -56,7 +56,7 @@ entry:
define i64 @f_load_msub_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = mul nsw i64 %0, %b
%sub = sub nsw i64 %a, %mul
ret i64 %sub
@@ -72,7 +72,7 @@ entry:
define i32 @f_load_msub_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = mul nsw i32 %0, %b
%sub = sub nsw i32 %a, %mul
ret i32 %sub
@@ -87,7 +87,7 @@ entry:
define i64 @f_load_mul_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = mul nsw i64 %0, %b
ret i64 %mul
}
@@ -101,7 +101,7 @@ entry:
define i32 @f_load_mul_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = mul nsw i32 %0, %b
ret i32 %mul
}
@@ -115,7 +115,7 @@ entry:
define i64 @f_load_mneg_64(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
%mul = sub i64 0, %b
%sub = mul i64 %0, %mul
ret i64 %sub
@@ -133,7 +133,7 @@ entry:
define i32 @f_load_mneg_32(i32 %a, i32 %b, i32* nocapture readonly %c) #0 {
entry:
- %0 = load i32* %c, align 4
+ %0 = load i32, i32* %c, align 4
%mul = sub i32 0, %b
%sub = mul i32 %0, %mul
ret i32 %sub
@@ -154,7 +154,7 @@ entry:
%conv1 = sext i32 %c to i64
%mul = mul nsw i64 %conv1, %conv
%add = add nsw i64 %mul, %a
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%add3 = add nsw i64 %add, %conv2
ret i64 %add3
@@ -174,7 +174,7 @@ entry:
%conv1 = sext i32 %c to i64
%mul = mul nsw i64 %conv1, %conv
%sub = sub i64 %a, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%add = add nsw i64 %sub, %conv2
ret i64 %add
@@ -193,7 +193,7 @@ entry:
%conv = sext i32 %b to i64
%conv1 = sext i32 %c to i64
%mul = mul nsw i64 %conv1, %conv
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%div = sdiv i64 %mul, %conv2
ret i64 %div
@@ -212,7 +212,7 @@ entry:
%conv1 = sext i32 %c to i64
%mul = sub nsw i64 0, %conv
%sub = mul i64 %conv1, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = sext i32 %0 to i64
%div = sdiv i64 %sub, %conv2
ret i64 %div
@@ -229,7 +229,7 @@ entry:
%conv1 = zext i32 %c to i64
%mul = mul i64 %conv1, %conv
%add = add i64 %mul, %a
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%add3 = add i64 %add, %conv2
ret i64 %add3
@@ -249,7 +249,7 @@ entry:
%conv1 = zext i32 %c to i64
%mul = mul i64 %conv1, %conv
%sub = sub i64 %a, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%add = add i64 %sub, %conv2
ret i64 %add
@@ -268,7 +268,7 @@ entry:
%conv = zext i32 %b to i64
%conv1 = zext i32 %c to i64
%mul = mul i64 %conv1, %conv
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%div = udiv i64 %mul, %conv2
ret i64 %div
@@ -287,7 +287,7 @@ entry:
%conv1 = zext i32 %c to i64
%mul = sub nsw i64 0, %conv
%sub = mul i64 %conv1, %mul
- %0 = load i32* %d, align 4
+ %0 = load i32, i32* %d, align 4
%conv2 = zext i32 %0 to i64
%div = udiv i64 %sub, %conv2
ret i64 %div
@@ -300,7 +300,7 @@ entry:
define i64 @f_store_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
store i64 %a, i64* %e, align 8
%mul = mul nsw i64 %0, %b
%add = add nsw i64 %mul, %a
@@ -317,7 +317,7 @@ entry:
define i32 @f_store_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
store i32 %a, i32* %e, align 4
%mul = mul nsw i32 %0, %b
%add = add nsw i32 %mul, %a
@@ -333,7 +333,7 @@ entry:
define i64 @f_store_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
store i64 %a, i64* %e, align 8
%mul = mul nsw i64 %0, %b
%sub = sub nsw i64 %a, %mul
@@ -350,7 +350,7 @@ entry:
define i32 @f_store_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
store i32 %a, i32* %e, align 4
%mul = mul nsw i32 %0, %b
%sub = sub nsw i32 %a, %mul
@@ -366,7 +366,7 @@ entry:
define i64 @f_store_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
store i64 %a, i64* %e, align 8
%mul = mul nsw i64 %0, %b
ret i64 %mul
@@ -381,7 +381,7 @@ entry:
define i32 @f_store_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
store i32 %a, i32* %e, align 4
%mul = mul nsw i32 %0, %b
ret i32 %mul
@@ -396,7 +396,7 @@ entry:
define i64 @f_prefetch_madd_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
%1 = bitcast i64* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 0, i32 0, i32 1)
%mul = mul nsw i64 %0, %b
@@ -415,7 +415,7 @@ declare void @llvm.prefetch(i8* nocaptur
define i32 @f_prefetch_madd_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
%1 = bitcast i32* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 1, i32 0, i32 1)
%mul = mul nsw i32 %0, %b
@@ -431,7 +431,7 @@ entry:
define i64 @f_prefetch_msub_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
%1 = bitcast i64* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 0, i32 1, i32 1)
%mul = mul nsw i64 %0, %b
@@ -448,7 +448,7 @@ entry:
define i32 @f_prefetch_msub_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
%1 = bitcast i32* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 1, i32 1, i32 1)
%mul = mul nsw i32 %0, %b
@@ -464,7 +464,7 @@ entry:
define i64 @f_prefetch_mul_64(i64 %a, i64 %b, i64* nocapture readonly %cp, i64* nocapture %e) #1 {
entry:
- %0 = load i64* %cp, align 8
+ %0 = load i64, i64* %cp, align 8
%1 = bitcast i64* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 0, i32 3, i32 1)
%mul = mul nsw i64 %0, %b
@@ -479,7 +479,7 @@ entry:
define i32 @f_prefetch_mul_32(i32 %a, i32 %b, i32* nocapture readonly %cp, i32* nocapture %e) #1 {
entry:
- %0 = load i32* %cp, align 4
+ %0 = load i32, i32* %cp, align 4
%1 = bitcast i32* %e to i8*
tail call void @llvm.prefetch(i8* %1, i32 1, i32 3, i32 1)
%mul = mul nsw i32 %0, %b
@@ -494,7 +494,7 @@ entry:
define i64 @fall_through(i64 %a, i64 %b, i64* nocapture readonly %c) #0 {
entry:
- %0 = load i64* %c, align 8
+ %0 = load i64, i64* %c, align 8
br label %block1
block1:
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-gep-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-gep-opt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-gep-opt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-gep-opt.ll Fri Feb 27 15:17:42 2015
@@ -15,13 +15,13 @@ target triple = "aarch64-linux-gnueabi"
; elimilate the common subexpression for the second use.
define void @test_GEP_CSE([240 x %struct]* %string, i32* %adj, i32 %lib, i64 %idxprom) {
%liberties = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 3
- %1 = load i32* %liberties, align 4
+ %1 = load i32, i32* %liberties, align 4
%cmp = icmp eq i32 %1, %lib
br i1 %cmp, label %if.then, label %if.end
if.then: ; preds = %entry
%origin = getelementptr [240 x %struct], [240 x %struct]* %string, i64 1, i64 %idxprom, i32 2
- %2 = load i32* %origin, align 4
+ %2 = load i32, i32* %origin, align 4
store i32 %2, i32* %adj, align 4
br label %if.end
@@ -66,9 +66,9 @@ if.end:
; use.
define void @test_GEP_across_BB(%class.my* %this, i64 %idx) {
%1 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 1
- %2 = load i32* %1, align 4
+ %2 = load i32, i32* %1, align 4
%3 = getelementptr %class.my, %class.my* %this, i64 0, i32 3, i64 %idx, i32 2
- %4 = load i32* %3, align 4
+ %4 = load i32, i32* %3, align 4
%5 = icmp eq i32 %2, %4
br i1 %5, label %if.true, label %exit
Modified: llvm/trunk/test/CodeGen/AArch64/aarch64-smull.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/aarch64-smull.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/aarch64-smull.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/aarch64-smull.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
define <8 x i16> @smull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: smull_v8i8_v8i16:
; CHECK: smull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = mul <8 x i16> %tmp3, %tmp4
@@ -14,8 +14,8 @@ define <8 x i16> @smull_v8i8_v8i16(<8 x
define <4 x i32> @smull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: smull_v4i16_v4i32:
; CHECK: smull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = mul <4 x i32> %tmp3, %tmp4
@@ -25,8 +25,8 @@ define <4 x i32> @smull_v4i16_v4i32(<4 x
define <2 x i64> @smull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: smull_v2i32_v2i64:
; CHECK: smull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = mul <2 x i64> %tmp3, %tmp4
@@ -36,8 +36,8 @@ define <2 x i64> @smull_v2i32_v2i64(<2 x
define <8 x i16> @umull_v8i8_v8i16(<8 x i8>* %A, <8 x i8>* %B) nounwind {
; CHECK-LABEL: umull_v8i8_v8i16:
; CHECK: umull {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = mul <8 x i16> %tmp3, %tmp4
@@ -47,8 +47,8 @@ define <8 x i16> @umull_v8i8_v8i16(<8 x
define <4 x i32> @umull_v4i16_v4i32(<4 x i16>* %A, <4 x i16>* %B) nounwind {
; CHECK-LABEL: umull_v4i16_v4i32:
; CHECK: umull {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = mul <4 x i32> %tmp3, %tmp4
@@ -58,8 +58,8 @@ define <4 x i32> @umull_v4i16_v4i32(<4 x
define <2 x i64> @umull_v2i32_v2i64(<2 x i32>* %A, <2 x i32>* %B) nounwind {
; CHECK-LABEL: umull_v2i32_v2i64:
; CHECK: umull {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = mul <2 x i64> %tmp3, %tmp4
@@ -69,9 +69,9 @@ define <2 x i64> @umull_v2i32_v2i64(<2 x
define <8 x i16> @smlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
; CHECK-LABEL: smlal_v8i8_v8i16:
; CHECK: smlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = load <8 x i8>, <8 x i8>* %C
%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -82,9 +82,9 @@ define <8 x i16> @smlal_v8i8_v8i16(<8 x
define <4 x i32> @smlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
; CHECK-LABEL: smlal_v4i16_v4i32:
; CHECK: smlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = load <4 x i16>, <4 x i16>* %C
%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -95,9 +95,9 @@ define <4 x i32> @smlal_v4i16_v4i32(<4 x
define <2 x i64> @smlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
; CHECK-LABEL: smlal_v2i32_v2i64:
; CHECK: smlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = load <2 x i32>, <2 x i32>* %C
%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
%tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -108,9 +108,9 @@ define <2 x i64> @smlal_v2i32_v2i64(<2 x
define <8 x i16> @umlal_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
; CHECK-LABEL: umlal_v8i8_v8i16:
; CHECK: umlal {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = load <8 x i8>, <8 x i8>* %C
%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -121,9 +121,9 @@ define <8 x i16> @umlal_v8i8_v8i16(<8 x
define <4 x i32> @umlal_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
; CHECK-LABEL: umlal_v4i16_v4i32:
; CHECK: umlal {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = load <4 x i16>, <4 x i16>* %C
%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -134,9 +134,9 @@ define <4 x i32> @umlal_v4i16_v4i32(<4 x
define <2 x i64> @umlal_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
; CHECK-LABEL: umlal_v2i32_v2i64:
; CHECK: umlal {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = load <2 x i32>, <2 x i32>* %C
%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
%tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -147,9 +147,9 @@ define <2 x i64> @umlal_v2i32_v2i64(<2 x
define <8 x i16> @smlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
; CHECK-LABEL: smlsl_v8i8_v8i16:
; CHECK: smlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = load <8 x i8>, <8 x i8>* %C
%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = sext <8 x i8> %tmp3 to <8 x i16>
%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -160,9 +160,9 @@ define <8 x i16> @smlsl_v8i8_v8i16(<8 x
define <4 x i32> @smlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
; CHECK-LABEL: smlsl_v4i16_v4i32:
; CHECK: smlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = load <4 x i16>, <4 x i16>* %C
%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = sext <4 x i16> %tmp3 to <4 x i32>
%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -173,9 +173,9 @@ define <4 x i32> @smlsl_v4i16_v4i32(<4 x
define <2 x i64> @smlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
; CHECK-LABEL: smlsl_v2i32_v2i64:
; CHECK: smlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = load <2 x i32>, <2 x i32>* %C
%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = sext <2 x i32> %tmp3 to <2 x i64>
%tmp6 = mul <2 x i64> %tmp4, %tmp5
@@ -186,9 +186,9 @@ define <2 x i64> @smlsl_v2i32_v2i64(<2 x
define <8 x i16> @umlsl_v8i8_v8i16(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
; CHECK-LABEL: umlsl_v8i8_v8i16:
; CHECK: umlsl {{v[0-9]+}}.8h, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i8>* %C
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = load <8 x i8>, <8 x i8>* %C
%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = zext <8 x i8> %tmp3 to <8 x i16>
%tmp6 = mul <8 x i16> %tmp4, %tmp5
@@ -199,9 +199,9 @@ define <8 x i16> @umlsl_v8i8_v8i16(<8 x
define <4 x i32> @umlsl_v4i16_v4i32(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
; CHECK-LABEL: umlsl_v4i16_v4i32:
; CHECK: umlsl {{v[0-9]+}}.4s, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i16>* %C
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = load <4 x i16>, <4 x i16>* %C
%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = zext <4 x i16> %tmp3 to <4 x i32>
%tmp6 = mul <4 x i32> %tmp4, %tmp5
@@ -212,9 +212,9 @@ define <4 x i32> @umlsl_v4i16_v4i32(<4 x
define <2 x i64> @umlsl_v2i32_v2i64(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
; CHECK-LABEL: umlsl_v2i32_v2i64:
; CHECK: umlsl {{v[0-9]+}}.2d, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i32>* %C
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = load <2 x i32>, <2 x i32>* %C
%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = zext <2 x i32> %tmp3 to <2 x i64>
%tmp6 = mul <2 x i64> %tmp4, %tmp5
Modified: llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/addsub-shifted.ll Fri Feb 27 15:17:42 2015
@@ -6,63 +6,63 @@
define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
; CHECK-LABEL: test_lsl_arith:
- %rhs1 = load volatile i32* @var32
+ %rhs1 = load volatile i32, i32* @var32
%shift1 = shl i32 %rhs1, 18
%val1 = add i32 %lhs32, %shift1
store volatile i32 %val1, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
- %rhs2 = load volatile i32* @var32
+ %rhs2 = load volatile i32, i32* @var32
%shift2 = shl i32 %rhs2, 31
%val2 = add i32 %shift2, %lhs32
store volatile i32 %val2, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
- %rhs3 = load volatile i32* @var32
+ %rhs3 = load volatile i32, i32* @var32
%shift3 = shl i32 %rhs3, 5
%val3 = sub i32 %lhs32, %shift3
store volatile i32 %val3, i32* @var32
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
; Subtraction is not commutative!
- %rhs4 = load volatile i32* @var32
+ %rhs4 = load volatile i32, i32* @var32
%shift4 = shl i32 %rhs4, 19
%val4 = sub i32 %shift4, %lhs32
store volatile i32 %val4, i32* @var32
; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
- %lhs4a = load volatile i32* @var32
+ %lhs4a = load volatile i32, i32* @var32
%shift4a = shl i32 %lhs4a, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
- %rhs5 = load volatile i64* @var64
+ %rhs5 = load volatile i64, i64* @var64
%shift5 = shl i64 %rhs5, 18
%val5 = add i64 %lhs64, %shift5
store volatile i64 %val5, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
- %rhs6 = load volatile i64* @var64
+ %rhs6 = load volatile i64, i64* @var64
%shift6 = shl i64 %rhs6, 31
%val6 = add i64 %shift6, %lhs64
store volatile i64 %val6, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
- %rhs7 = load volatile i64* @var64
+ %rhs7 = load volatile i64, i64* @var64
%shift7 = shl i64 %rhs7, 5
%val7 = sub i64 %lhs64, %shift7
store volatile i64 %val7, i64* @var64
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
; Subtraction is not commutative!
- %rhs8 = load volatile i64* @var64
+ %rhs8 = load volatile i64, i64* @var64
%shift8 = shl i64 %rhs8, 19
%val8 = sub i64 %shift8, %lhs64
store volatile i64 %val8, i64* @var64
; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
- %lhs8a = load volatile i64* @var64
+ %lhs8a = load volatile i64, i64* @var64
%shift8a = shl i64 %lhs8a, 60
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
Modified: llvm/trunk/test/CodeGen/AArch64/addsub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/addsub.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/addsub.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/addsub.ll Fri Feb 27 15:17:42 2015
@@ -12,12 +12,12 @@ define void @add_small() {
; CHECK-LABEL: add_small:
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, #4095
- %val32 = load i32* @var_i32
+ %val32 = load i32, i32* @var_i32
%newval32 = add i32 %val32, 4095
store i32 %newval32, i32* @var_i32
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, #52
- %val64 = load i64* @var_i64
+ %val64 = load i64, i64* @var_i64
%newval64 = add i64 %val64, 52
store i64 %newval64, i64* @var_i64
@@ -29,12 +29,12 @@ define void @add_med() {
; CHECK-LABEL: add_med:
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
- %val32 = load i32* @var_i32
+ %val32 = load i32, i32* @var_i32
%newval32 = add i32 %val32, 14610432 ; =0xdef000
store i32 %newval32, i32* @var_i32
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}}
- %val64 = load i64* @var_i64
+ %val64 = load i64, i64* @var_i64
%newval64 = add i64 %val64, 16773120 ; =0xfff000
store i64 %newval64, i64* @var_i64
@@ -46,12 +46,12 @@ define void @sub_small() {
; CHECK-LABEL: sub_small:
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, #4095
- %val32 = load i32* @var_i32
+ %val32 = load i32, i32* @var_i32
%newval32 = sub i32 %val32, 4095
store i32 %newval32, i32* @var_i32
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, #52
- %val64 = load i64* @var_i64
+ %val64 = load i64, i64* @var_i64
%newval64 = sub i64 %val64, 52
store i64 %newval64, i64* @var_i64
@@ -63,12 +63,12 @@ define void @sub_med() {
; CHECK-LABEL: sub_med:
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{#3567, lsl #12|#14610432}}
- %val32 = load i32* @var_i32
+ %val32 = load i32, i32* @var_i32
%newval32 = sub i32 %val32, 14610432 ; =0xdef000
store i32 %newval32, i32* @var_i32
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{#4095, lsl #12|#16773120}}
- %val64 = load i64* @var_i64
+ %val64 = load i64, i64* @var_i64
%newval64 = sub i64 %val64, 16773120 ; =0xfff000
store i64 %newval64, i64* @var_i64
@@ -77,7 +77,7 @@ define void @sub_med() {
define void @testing() {
; CHECK-LABEL: testing:
- %val = load i32* @var_i32
+ %val = load i32, i32* @var_i32
; CHECK: cmp {{w[0-9]+}}, #4095
; CHECK: b.ne [[RET:.?LBB[0-9]+_[0-9]+]]
Modified: llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/addsub_ext.ll Fri Feb 27 15:17:42 2015
@@ -7,9 +7,9 @@
define void @addsub_i8rhs() minsize {
; CHECK-LABEL: addsub_i8rhs:
- %val8_tmp = load i8* @var8
- %lhs32 = load i32* @var32
- %lhs64 = load i64* @var64
+ %val8_tmp = load i8, i8* @var8
+ %lhs32 = load i32, i32* @var32
+ %lhs64 = load i64, i64* @var64
; Need this to prevent extension upon load and give a vanilla i8 operand.
%val8 = add i8 %val8_tmp, 123
@@ -82,9 +82,9 @@ end:
define void @addsub_i16rhs() minsize {
; CHECK-LABEL: addsub_i16rhs:
- %val16_tmp = load i16* @var16
- %lhs32 = load i32* @var32
- %lhs64 = load i64* @var64
+ %val16_tmp = load i16, i16* @var16
+ %lhs32 = load i32, i32* @var32
+ %lhs64 = load i64, i64* @var64
; Need this to prevent extension upon load and give a vanilla i16 operand.
%val16 = add i16 %val16_tmp, 123
@@ -160,8 +160,8 @@ end:
; in the face of "add/sub (shifted register)" so I don't intend to.
define void @addsub_i32rhs() minsize {
; CHECK-LABEL: addsub_i32rhs:
- %val32_tmp = load i32* @var32
- %lhs64 = load i64* @var64
+ %val32_tmp = load i32, i32* @var32
+ %lhs64 = load i64, i64* @var64
%val32 = add i32 %val32_tmp, 123
Modified: llvm/trunk/test/CodeGen/AArch64/alloca.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/alloca.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/alloca.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/alloca.ll Fri Feb 27 15:17:42 2015
@@ -51,7 +51,7 @@ define i64 @test_alloca_with_local(i64 %
call void @use_addr_loc(i8* %buf, i64* %loc)
; CHECK: bl use_addr
- %val = load i64* %loc
+ %val = load i64, i64* %loc
; CHECK: ldur x0, [x29, #-[[LOC_FROM_FP]]]
Modified: llvm/trunk/test/CodeGen/AArch64/and-mask-removal.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/and-mask-removal.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/and-mask-removal.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/and-mask-removal.ll Fri Feb 27 15:17:42 2015
@@ -9,13 +9,13 @@ define void @new_position(i32 %pos) {
entry:
%idxprom = sext i32 %pos to i64
%arrayidx = getelementptr inbounds [400 x i8], [400 x i8]* @board, i64 0, i64 %idxprom
- %tmp = load i8* %arrayidx, align 1
+ %tmp = load i8, i8* %arrayidx, align 1
%.off = add i8 %tmp, -1
%switch = icmp ult i8 %.off, 2
br i1 %switch, label %if.then, label %if.end
if.then: ; preds = %entry
- %tmp1 = load i32* @next_string, align 4
+ %tmp1 = load i32, i32* @next_string, align 4
%arrayidx8 = getelementptr inbounds [400 x i32], [400 x i32]* @string_number, i64 0, i64 %idxprom
store i32 %tmp1, i32* %arrayidx8, align 4
br label %if.end
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-2011-04-21-CPSRBug.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ entry:
%cmp = icmp eq i32* null, undef
%frombool = zext i1 %cmp to i8
store i8 %frombool, i8* undef, align 1
- %tmp4 = load i8* undef, align 1
+ %tmp4 = load i8, i8* undef, align 1
%tobool = trunc i8 %tmp4 to i1
br i1 %tobool, label %land.lhs.true, label %if.end
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-2011-10-18-LdStOptBug.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ for.body:
%0 = shl nsw i64 %indvars.iv, 12
%add = add nsw i64 %0, 34628173824
%1 = inttoptr i64 %add to i32*
- %2 = load volatile i32* %1, align 4096
+ %2 = load volatile i32, i32* %1, align 4096
store volatile i32 %2, i32* @test_data, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-01-11-ComparisonDAGCrash.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ lor.lhs.false:
br i1 undef, label %return, label %if.end
if.end:
- %tmp.i = load i64* undef, align 8
+ %tmp.i = load i64, i64* undef, align 8
%and.i.i.i = and i64 %tmp.i, -16
br i1 %IsArrow, label %if.else_crit_edge, label %if.end32
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-05-22-LdStOptBug.ll Fri Feb 27 15:17:42 2015
@@ -15,23 +15,23 @@ define hidden %struct.CGRect @t(%0* noca
entry:
; CHECK-LABEL: t:
; CHECK: ldp d{{[0-9]+}}, d{{[0-9]+}}
- %ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+ %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
%0 = bitcast %0* %self to i8*
%add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr10.0 = bitcast i8* %add.ptr to double*
- %tmp11 = load double* %add.ptr10.0, align 8
+ %tmp11 = load double, double* %add.ptr10.0, align 8
%add.ptr.sum = add i64 %ivar, 8
%add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
%1 = bitcast i8* %add.ptr10.1 to double*
- %tmp12 = load double* %1, align 8
+ %tmp12 = load double, double* %1, align 8
%add.ptr.sum17 = add i64 %ivar, 16
%add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17
%add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
- %tmp = load double* %add.ptr4.1.0, align 8
+ %tmp = load double, double* %add.ptr4.1.0, align 8
%add.ptr4.1.sum = add i64 %ivar, 24
%add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum
%2 = bitcast i8* %add.ptr4.1.1 to double*
- %tmp5 = load double* %2, align 8
+ %tmp5 = load double, double* %2, align 8
%insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
%insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
%insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-2012-06-06-FPToUI.ll Fri Feb 27 15:17:42 2015
@@ -13,12 +13,12 @@ define void @testDouble(double %d) ssp {
entry:
%d.addr = alloca double, align 8
store double %d, double* %d.addr, align 8
- %0 = load double* %d.addr, align 8
- %1 = load double* %d.addr, align 8
+ %0 = load double, double* %d.addr, align 8
+ %1 = load double, double* %d.addr, align 8
%conv = fptoui double %1 to i64
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([9 x i8]* @.str, i32 0, i32 0), double %0, i64 %conv)
- %2 = load double* %d.addr, align 8
- %3 = load double* %d.addr, align 8
+ %2 = load double, double* %d.addr, align 8
+ %3 = load double, double* %d.addr, align 8
%conv1 = fptoui double %3 to i32
%call2 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str1, i32 0, i32 0), double %2, i32 %conv1)
ret void
@@ -33,14 +33,14 @@ define void @testFloat(float %f) ssp {
entry:
%f.addr = alloca float, align 4
store float %f, float* %f.addr, align 4
- %0 = load float* %f.addr, align 4
+ %0 = load float, float* %f.addr, align 4
%conv = fpext float %0 to double
- %1 = load float* %f.addr, align 4
+ %1 = load float, float* %f.addr, align 4
%conv1 = fptoui float %1 to i64
%call = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([8 x i8]* @.str2, i32 0, i32 0), double %conv, i64 %conv1)
- %2 = load float* %f.addr, align 4
+ %2 = load float, float* %f.addr, align 4
%conv2 = fpext float %2 to double
- %3 = load float* %f.addr, align 4
+ %3 = load float, float* %f.addr, align 4
%conv3 = fptoui float %3 to i32
%call4 = call i32 (i8*, ...)* @printf(i8* getelementptr inbounds ([7 x i8]* @.str3, i32 0, i32 0), double %conv2, i32 %conv3)
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-abi-varargs.ll Fri Feb 27 15:17:42 2015
@@ -82,18 +82,18 @@ define i32 @main() nounwind ssp {
store i32 10, i32* %a10, align 4
store i32 11, i32* %a11, align 4
store i32 12, i32* %a12, align 4
- %1 = load i32* %a1, align 4
- %2 = load i32* %a2, align 4
- %3 = load i32* %a3, align 4
- %4 = load i32* %a4, align 4
- %5 = load i32* %a5, align 4
- %6 = load i32* %a6, align 4
- %7 = load i32* %a7, align 4
- %8 = load i32* %a8, align 4
- %9 = load i32* %a9, align 4
- %10 = load i32* %a10, align 4
- %11 = load i32* %a11, align 4
- %12 = load i32* %a12, align 4
+ %1 = load i32, i32* %a1, align 4
+ %2 = load i32, i32* %a2, align 4
+ %3 = load i32, i32* %a3, align 4
+ %4 = load i32, i32* %a4, align 4
+ %5 = load i32, i32* %a5, align 4
+ %6 = load i32, i32* %a6, align 4
+ %7 = load i32, i32* %a7, align 4
+ %8 = load i32, i32* %a8, align 4
+ %9 = load i32, i32* %a9, align 4
+ %10 = load i32, i32* %a10, align 4
+ %11 = load i32, i32* %a11, align 4
+ %12 = load i32, i32* %a12, align 4
call void (i32, i32, i32, i32, i32, i32, i32, i32, i32, ...)* @fn9(i32 %1, i32 %2, i32 %3, i32 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9, i32 %10, i32 %11, i32 %12)
ret i32 0
}
@@ -131,8 +131,8 @@ entry:
%y.addr = alloca <4 x i32>, align 16
store i32 %x, i32* %x.addr, align 4
store <4 x i32> %y, <4 x i32>* %y.addr, align 16
- %0 = load i32* %x.addr, align 4
- %1 = load <4 x i32>* %y.addr, align 16
+ %0 = load i32, i32* %x.addr, align 4
+ %1 = load <4 x i32>, <4 x i32>* %y.addr, align 16
call void (i8*, ...)* @foo(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %0, <4 x i32> %1)
ret void
}
@@ -158,7 +158,7 @@ entry:
call void @llvm.va_start(i8* %args1)
%0 = va_arg i8** %args, i32
store i32 %0, i32* %vc, align 4
- %ap.cur = load i8** %args
+ %ap.cur = load i8*, i8** %args
%1 = getelementptr i8, i8* %ap.cur, i32 15
%2 = ptrtoint i8* %1 to i64
%3 = and i64 %2, -16
@@ -183,9 +183,9 @@ entry:
store i32 %x, i32* %x.addr, align 4
%0 = bitcast %struct.s41* %s41 to i128*
store i128 %s41.coerce, i128* %0, align 1
- %1 = load i32* %x.addr, align 4
+ %1 = load i32, i32* %x.addr, align 4
%2 = bitcast %struct.s41* %s41 to i128*
- %3 = load i128* %2, align 1
+ %3 = load i128, i128* %2, align 1
call void (i8*, ...)* @foo2(i8* getelementptr inbounds ([4 x i8]* @.str, i32 0, i32 0), i32 %1, i128 %3)
ret void
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-abi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-abi.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-abi.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-abi.ll Fri Feb 27 15:17:42 2015
@@ -79,7 +79,7 @@ entry:
; FAST: sub sp, sp
; FAST: mov x[[ADDR:[0-9]+]], sp
; FAST: str [[REG_1:q[0-9]+]], [x[[ADDR]], #16]
- %0 = load <4 x i32>* %in, align 16
+ %0 = load <4 x i32>, <4 x i32>* %in, align 16
%call = tail call double @args_vec_4i(double 3.000000e+00, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, <4 x i32> %0, double 3.000000e+00, <4 x i32> %0, i8 signext 3)
ret double %call
}
@@ -133,7 +133,7 @@ entry:
; FAST: sub sp, sp, #32
; FAST: mov x[[ADDR:[0-9]+]], sp
; FAST: str [[REG_1:d[0-9]+]], [x[[ADDR]], #8]
- %0 = load <2 x i32>* %in, align 8
+ %0 = load <2 x i32>, <2 x i32>* %in, align 8
%call = tail call double @args_vec_2i(double 3.000000e+00, <2 x i32> %0,
<2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0, <2 x i32> %0,
<2 x i32> %0, float 3.000000e+00, <2 x i32> %0, i8 signext 3)
@@ -148,7 +148,7 @@ entry:
; CHECK: str [[REG_1:d[0-9]+]], [sp, #8]
; CHECK: str [[REG_2:w[0-9]+]], [sp]
; CHECK: orr w0, wzr, #0x3
- %0 = load double* %in, align 8
+ %0 = load double, double* %in, align 8
%call = tail call double @args_f64(double 3.000000e+00, double %0, double %0,
double %0, double %0, double %0, double %0, double %0,
float 3.000000e+00, double %0, i8 signext 3)
@@ -163,7 +163,7 @@ entry:
; CHECK: strb [[REG_3:w[0-9]+]], [sp, #16]
; CHECK: str [[REG_1:x[0-9]+]], [sp, #8]
; CHECK: str [[REG_2:w[0-9]+]], [sp]
- %0 = load i64* %in, align 8
+ %0 = load i64, i64* %in, align 8
%call = tail call i64 @args_i64(i64 3, i64 %0, i64 %0, i64 %0, i64 %0, i64 %0,
i64 %0, i64 %0, i32 3, i64 %0, i8 signext 3)
ret i64 %call
@@ -177,7 +177,7 @@ entry:
; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
; CHECK: str [[REG_1:s[0-9]+]], [sp, #4]
; CHECK: strh [[REG_3:w[0-9]+]], [sp]
- %0 = load float* %in, align 4
+ %0 = load float, float* %in, align 4
%call = tail call i32 @args_f32(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
i32 7, i32 8, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0,
float 6.0, float 7.0, float 8.0, i16 signext 3, float %0,
@@ -194,7 +194,7 @@ entry:
; CHECK: strb [[REG_2:w[0-9]+]], [sp, #8]
; CHECK: str [[REG_1:w[0-9]+]], [sp, #4]
; CHECK: strh [[REG_3:w[0-9]+]], [sp]
- %0 = load i32* %in, align 4
+ %0 = load i32, i32* %in, align 4
%call = tail call i32 @args_i32(i32 3, i32 %0, i32 %0, i32 %0, i32 %0, i32 %0,
i32 %0, i32 %0, i16 signext 3, i32 %0, i8 signext 4)
ret i32 %call
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-abi_align.ll Fri Feb 27 15:17:42 2015
@@ -59,8 +59,8 @@ entry:
; CHECK-LABEL: caller38
; CHECK: ldr x1,
; CHECK: ldr x2,
- %0 = load i64* bitcast (%struct.s38* @g38 to i64*), align 4
- %1 = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
+ %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4
+ %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
%call = tail call i32 @f38(i32 3, i64 %0, i64 %1) #5
ret i32 %call
}
@@ -76,8 +76,8 @@ entry:
; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8]
; CHECK: movz w[[C:[0-9]+]], #0x9
; CHECK: str w[[C]], [sp]
- %0 = load i64* bitcast (%struct.s38* @g38 to i64*), align 4
- %1 = load i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
+ %0 = load i64, i64* bitcast (%struct.s38* @g38 to i64*), align 4
+ %1 = load i64, i64* bitcast (%struct.s38* @g38_2 to i64*), align 4
%call = tail call i32 @f38_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
i32 7, i32 8, i32 9, i64 %0, i64 %1) #5
ret i32 %call
@@ -112,8 +112,8 @@ entry:
; CHECK-LABEL: caller39
; CHECK: ldp x1, x2,
; CHECK: ldp x3, x4,
- %0 = load i128* bitcast (%struct.s39* @g39 to i128*), align 16
- %1 = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
+ %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16
+ %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
%call = tail call i32 @f39(i32 3, i128 %0, i128 %1) #5
ret i32 %call
}
@@ -130,8 +130,8 @@ entry:
; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
; CHECK: movz w[[C:[0-9]+]], #0x9
; CHECK: str w[[C]], [sp]
- %0 = load i128* bitcast (%struct.s39* @g39 to i128*), align 16
- %1 = load i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
+ %0 = load i128, i128* bitcast (%struct.s39* @g39 to i128*), align 16
+ %1 = load i128, i128* bitcast (%struct.s39* @g39_2 to i128*), align 16
%call = tail call i32 @f39_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
i32 7, i32 8, i32 9, i128 %0, i128 %1) #5
ret i32 %call
@@ -168,8 +168,8 @@ entry:
; CHECK-LABEL: caller40
; CHECK: ldp x1, x2,
; CHECK: ldp x3, x4,
- %0 = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
- %1 = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
+ %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
+ %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
%call = tail call i32 @f40(i32 3, [2 x i64] %0, [2 x i64] %1) #5
ret i32 %call
}
@@ -186,8 +186,8 @@ entry:
; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #8]
; CHECK: movz w[[C:[0-9]+]], #0x9
; CHECK: str w[[C]], [sp]
- %0 = load [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
- %1 = load [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
+ %0 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40 to [2 x i64]*), align 4
+ %1 = load [2 x i64], [2 x i64]* bitcast (%struct.s40* @g40_2 to [2 x i64]*), align 4
%call = tail call i32 @f40_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
i32 7, i32 8, i32 9, [2 x i64] %0, [2 x i64] %1) #5
ret i32 %call
@@ -222,8 +222,8 @@ entry:
; CHECK-LABEL: caller41
; CHECK: ldp x1, x2,
; CHECK: ldp x3, x4,
- %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16
- %1 = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
+ %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
+ %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
%call = tail call i32 @f41(i32 3, i128 %0, i128 %1) #5
ret i32 %call
}
@@ -240,8 +240,8 @@ entry:
; CHECK: stp {{x[0-9]+}}, {{x[0-9]+}}, [sp, #16]
; CHECK: movz w[[C:[0-9]+]], #0x9
; CHECK: str w[[C]], [sp]
- %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16
- %1 = load i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
+ %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
+ %1 = load i128, i128* bitcast (%struct.s41* @g41_2 to i128*), align 16
%call = tail call i32 @f41_stack(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6,
i32 7, i32 8, i32 9, i128 %0, i128 %1) #5
ret i32 %call
@@ -261,14 +261,14 @@ entry:
; FAST: add w[[C:[0-9]+]], w[[A]], w0
; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
%i1 = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 0
- %0 = load i32* %i1, align 4, !tbaa !0
+ %0 = load i32, i32* %i1, align 4, !tbaa !0
%i2 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 0
- %1 = load i32* %i2, align 4, !tbaa !0
+ %1 = load i32, i32* %i2, align 4, !tbaa !0
%s = getelementptr inbounds %struct.s42, %struct.s42* %s1, i64 0, i32 1
- %2 = load i16* %s, align 2, !tbaa !3
+ %2 = load i16, i16* %s, align 2, !tbaa !3
%conv = sext i16 %2 to i32
%s5 = getelementptr inbounds %struct.s42, %struct.s42* %s2, i64 0, i32 1
- %3 = load i16* %s5, align 2, !tbaa !3
+ %3 = load i16, i16* %s5, align 2, !tbaa !3
%conv6 = sext i16 %3 to i32
%add = add i32 %0, %i
%add3 = add i32 %add, %1
@@ -370,14 +370,14 @@ entry:
; FAST: add w[[C:[0-9]+]], w[[A]], w0
; FAST: add {{w[0-9]+}}, w[[C]], w[[B]]
%i1 = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 0
- %0 = load i32* %i1, align 4, !tbaa !0
+ %0 = load i32, i32* %i1, align 4, !tbaa !0
%i2 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 0
- %1 = load i32* %i2, align 4, !tbaa !0
+ %1 = load i32, i32* %i2, align 4, !tbaa !0
%s = getelementptr inbounds %struct.s43, %struct.s43* %s1, i64 0, i32 1
- %2 = load i16* %s, align 2, !tbaa !3
+ %2 = load i16, i16* %s, align 2, !tbaa !3
%conv = sext i16 %2 to i32
%s5 = getelementptr inbounds %struct.s43, %struct.s43* %s2, i64 0, i32 1
- %3 = load i16* %s5, align 2, !tbaa !3
+ %3 = load i16, i16* %s5, align 2, !tbaa !3
%conv6 = sext i16 %3 to i32
%add = add i32 %0, %i
%add3 = add i32 %add, %1
@@ -493,7 +493,7 @@ entry:
; Load/Store opt is disabled with -O0, so the i128 is split.
; FAST: str {{x[0-9]+}}, [x[[ADDR]], #8]
; FAST: str {{x[0-9]+}}, [x[[ADDR]]]
- %0 = load i128* bitcast (%struct.s41* @g41 to i128*), align 16
+ %0 = load i128, i128* bitcast (%struct.s41* @g41 to i128*), align 16
%call = tail call i32 @callee_i128_split(i32 1, i32 2, i32 3, i32 4, i32 5,
i32 6, i32 7, i128 %0, i32 8) #5
ret i32 %call
@@ -514,7 +514,7 @@ entry:
; FAST: mov x[[R0:[0-9]+]], sp
; FAST: orr w[[R1:[0-9]+]], wzr, #0x8
; FAST: str w[[R1]], {{\[}}x[[R0]]{{\]}}
- %0 = load i64* bitcast (%struct.s41* @g41 to i64*), align 16
+ %0 = load i64, i64* bitcast (%struct.s41* @g41 to i64*), align 16
%call = tail call i32 @callee_i64(i32 1, i32 2, i32 3, i32 4, i32 5,
i32 6, i32 7, i64 %0, i32 8) #5
ret i32 %call
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-addr-mode-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-addr-mode-folding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-addr-mode-folding.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-addr-mode-folding.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@ define i32 @fct(i32 %i1, i32 %i2) {
; _CHECK-NOT_: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
- %0 = load i8** @block, align 8
+ %0 = load i8*, i8** @block, align 8
%arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
%arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
- %2 = load i8* %arrayidx2, align 1
+ %2 = load i8, i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %if.end, label %if.then
@@ -30,10 +30,10 @@ if.end:
%inc9 = add nsw i32 %i2, 1
%idxprom10 = sext i32 %inc to i64
%arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10
- %3 = load i8* %arrayidx11, align 1
+ %3 = load i8, i8* %arrayidx11, align 1
%idxprom12 = sext i32 %inc9 to i64
%arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12
- %4 = load i8* %arrayidx13, align 1
+ %4 = load i8, i8* %arrayidx13, align 1
%cmp16 = icmp eq i8 %3, %4
br i1 %cmp16, label %if.end23, label %if.then18
@@ -47,10 +47,10 @@ if.end23:
%inc25 = add nsw i32 %i2, 2
%idxprom26 = sext i32 %inc24 to i64
%arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26
- %5 = load i8* %arrayidx27, align 1
+ %5 = load i8, i8* %arrayidx27, align 1
%idxprom28 = sext i32 %inc25 to i64
%arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28
- %6 = load i8* %arrayidx29, align 1
+ %6 = load i8, i8* %arrayidx29, align 1
%cmp32 = icmp eq i8 %5, %6
br i1 %cmp32, label %return, label %if.then34
@@ -71,12 +71,12 @@ define i32 @fct1(i32 %i1, i32 %i2) optsi
; CHECK: , sxtw]
entry:
%idxprom = sext i32 %i1 to i64
- %0 = load i8** @block, align 8
+ %0 = load i8*, i8** @block, align 8
%arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
%arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
- %2 = load i8* %arrayidx2, align 1
+ %2 = load i8, i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %if.end, label %if.then
@@ -90,10 +90,10 @@ if.end:
%inc9 = add nsw i32 %i2, 1
%idxprom10 = sext i32 %inc to i64
%arrayidx11 = getelementptr inbounds i8, i8* %0, i64 %idxprom10
- %3 = load i8* %arrayidx11, align 1
+ %3 = load i8, i8* %arrayidx11, align 1
%idxprom12 = sext i32 %inc9 to i64
%arrayidx13 = getelementptr inbounds i8, i8* %0, i64 %idxprom12
- %4 = load i8* %arrayidx13, align 1
+ %4 = load i8, i8* %arrayidx13, align 1
%cmp16 = icmp eq i8 %3, %4
br i1 %cmp16, label %if.end23, label %if.then18
@@ -107,10 +107,10 @@ if.end23:
%inc25 = add nsw i32 %i2, 2
%idxprom26 = sext i32 %inc24 to i64
%arrayidx27 = getelementptr inbounds i8, i8* %0, i64 %idxprom26
- %5 = load i8* %arrayidx27, align 1
+ %5 = load i8, i8* %arrayidx27, align 1
%idxprom28 = sext i32 %inc25 to i64
%arrayidx29 = getelementptr inbounds i8, i8* %0, i64 %idxprom28
- %6 = load i8* %arrayidx29, align 1
+ %6 = load i8, i8* %arrayidx29, align 1
%cmp32 = icmp eq i8 %5, %6
br i1 %cmp32, label %return, label %if.then34
@@ -136,8 +136,8 @@ entry:
if.then: ; preds = %entry
%idxprom = zext i8 %c to i64
%arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom
- %0 = load volatile i32* %arrayidx, align 4
- %1 = load volatile i32* %arrayidx, align 4
+ %0 = load volatile i32, i32* %arrayidx, align 4
+ %1 = load volatile i32, i32* %arrayidx, align 4
%add3 = add nsw i32 %1, %0
br label %if.end
@@ -160,8 +160,8 @@ entry:
if.then: ; preds = %entry
%idxprom = zext i8 %c to i64
%arrayidx = getelementptr inbounds i32, i32* %array, i64 %idxprom
- %0 = load volatile i32* %arrayidx, align 4
- %1 = load volatile i32* %arrayidx, align 4
+ %0 = load volatile i32, i32* %arrayidx, align 4
+ %1 = load volatile i32, i32* %arrayidx, align 4
%add3 = add nsw i32 %1, %0
br label %if.end
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-addr-type-promotion.ll Fri Feb 27 15:17:42 2015
@@ -28,12 +28,12 @@ define zeroext i8 @fullGtU(i32 %i1, i32
; CHECK-NEXT: cmp [[LOADEDVAL3]], [[LOADEDVAL4]]
entry:
%idxprom = sext i32 %i1 to i64
- %tmp = load i8** @block, align 8
+ %tmp = load i8*, i8** @block, align 8
%arrayidx = getelementptr inbounds i8, i8* %tmp, i64 %idxprom
- %tmp1 = load i8* %arrayidx, align 1
+ %tmp1 = load i8, i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
%arrayidx2 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom1
- %tmp2 = load i8* %arrayidx2, align 1
+ %tmp2 = load i8, i8* %arrayidx2, align 1
%cmp = icmp eq i8 %tmp1, %tmp2
br i1 %cmp, label %if.end, label %if.then
@@ -47,10 +47,10 @@ if.end:
%inc10 = add nsw i32 %i2, 1
%idxprom11 = sext i32 %inc to i64
%arrayidx12 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom11
- %tmp3 = load i8* %arrayidx12, align 1
+ %tmp3 = load i8, i8* %arrayidx12, align 1
%idxprom13 = sext i32 %inc10 to i64
%arrayidx14 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom13
- %tmp4 = load i8* %arrayidx14, align 1
+ %tmp4 = load i8, i8* %arrayidx14, align 1
%cmp17 = icmp eq i8 %tmp3, %tmp4
br i1 %cmp17, label %if.end25, label %if.then19
@@ -64,10 +64,10 @@ if.end25:
%inc27 = add nsw i32 %i2, 2
%idxprom28 = sext i32 %inc26 to i64
%arrayidx29 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom28
- %tmp5 = load i8* %arrayidx29, align 1
+ %tmp5 = load i8, i8* %arrayidx29, align 1
%idxprom30 = sext i32 %inc27 to i64
%arrayidx31 = getelementptr inbounds i8, i8* %tmp, i64 %idxprom30
- %tmp6 = load i8* %arrayidx31, align 1
+ %tmp6 = load i8, i8* %arrayidx31, align 1
%cmp34 = icmp eq i8 %tmp5, %tmp6
br i1 %cmp34, label %return, label %if.then36
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-addrmode.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
; CHECK: ret
define void @t1() {
%incdec.ptr = getelementptr inbounds i64, i64* @object, i64 1
- %tmp = load volatile i64* %incdec.ptr, align 8
+ %tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -21,7 +21,7 @@ define void @t1() {
; CHECK: ret
define void @t2() {
%incdec.ptr = getelementptr inbounds i64, i64* @object, i64 -33
- %tmp = load volatile i64* %incdec.ptr, align 8
+ %tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -31,7 +31,7 @@ define void @t2() {
; CHECK: ret
define void @t3() {
%incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4095
- %tmp = load volatile i64* %incdec.ptr, align 8
+ %tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -42,7 +42,7 @@ define void @t3() {
; CHECK: ret
define void @t4() {
%incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4096
- %tmp = load volatile i64* %incdec.ptr, align 8
+ %tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -52,7 +52,7 @@ define void @t4() {
; CHECK: ret
define void @t5(i64 %a) {
%incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a
- %tmp = load volatile i64* %incdec.ptr, align 8
+ %tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -65,7 +65,7 @@ define void @t5(i64 %a) {
define void @t6(i64 %a) {
%tmp1 = getelementptr inbounds i64, i64* @object, i64 %a
%incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096
- %tmp = load volatile i64* %incdec.ptr, align 8
+ %tmp = load volatile i64, i64* %incdec.ptr, align 8
ret void
}
@@ -76,7 +76,7 @@ define void @t7(i64 %a) {
; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
%1 = add i64 %a, 65535 ;0xffff
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -86,7 +86,7 @@ define void @t8(i64 %a) {
; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
%1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -96,7 +96,7 @@ define void @t9(i64 %a) {
; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
%1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -106,7 +106,7 @@ define void @t10(i64 %a) {
; CHECK-NEXT: ldr xzr, [x0, [[REG]]]
%1 = add i64 %a, 81909218222800896 ;0x123000000000000
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -117,7 +117,7 @@ define void @t11(i64 %a) {
; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]]
%1 = add i64 %a, 19088743 ;0x1234567
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -128,7 +128,7 @@ define void @t12(i64 %a) {
; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
%1 = add i64 %a, 4095 ;0xfff
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -138,7 +138,7 @@ define void @t13(i64 %a) {
; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
%1 = add i64 %a, -4095 ;-0xfff
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -148,7 +148,7 @@ define void @t14(i64 %a) {
; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
%1 = add i64 %a, 1191936 ;0x123000
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -158,7 +158,7 @@ define void @t15(i64 %a) {
; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]]
%1 = add i64 %a, -1191936 ;0xFFFFFFFFFFEDD000
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -167,7 +167,7 @@ define void @t16(i64 %a) {
; CHECK: ldr xzr, [x0, #28672]
%1 = add i64 %a, 28672 ;0x7000
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
@@ -176,6 +176,6 @@ define void @t17(i64 %a) {
; CHECK: ldur xzr, [x0, #-256]
%1 = add i64 %a, -256 ;-0x100
%2 = inttoptr i64 %1 to i64*
- %3 = load volatile i64* %2, align 8
+ %3 = load volatile i64, i64* %2, align 8
ret void
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-alloc-no-stack-realign.ll Fri Feb 27 15:17:42 2015
@@ -13,9 +13,9 @@ entry:
; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], {{\[}}[[BASE:x[0-9]+]], #32]
; CHECK: stp [[Q1:q[0-9]+]], [[Q2:q[0-9]+]], {{\[}}[[BASE]]]
%retval = alloca <16 x float>, align 16
- %0 = load <16 x float>* @T3_retval, align 16
+ %0 = load <16 x float>, <16 x float>* @T3_retval, align 16
store <16 x float> %0, <16 x float>* %retval
- %1 = load <16 x float>* %retval
+ %1 = load <16 x float>, <16 x float>* %retval
store <16 x float> %1, <16 x float>* %agg.result, align 16
ret void
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-alloca-frame-pointer-offset.ll Fri Feb 27 15:17:42 2015
@@ -13,17 +13,17 @@ define i32 @foo(i32 %a) nounwind {
%arr2 = alloca [32 x i32], align 4
%j = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- %tmp = load i32* %a.addr, align 4
+ %tmp = load i32, i32* %a.addr, align 4
%tmp1 = zext i32 %tmp to i64
%v = mul i64 4, %tmp1
%vla = alloca i8, i64 %v, align 4
%tmp2 = bitcast i8* %vla to i32*
- %tmp3 = load i32* %a.addr, align 4
+ %tmp3 = load i32, i32* %a.addr, align 4
store i32 %tmp3, i32* %i, align 4
- %tmp4 = load i32* %a.addr, align 4
+ %tmp4 = load i32, i32* %a.addr, align 4
store i32 %tmp4, i32* %j, align 4
- %tmp5 = load i32* %j, align 4
+ %tmp5 = load i32, i32* %j, align 4
store i32 %tmp5, i32* %retval
- %x = load i32* %retval
+ %x = load i32, i32* %retval
ret i32 %x
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-andCmpBrToTBZ.ll Fri Feb 27 15:17:42 2015
@@ -29,7 +29,7 @@ _ZNK7WebCore4Node10hasTagNameERKNS_13Qua
br i1 %cmp.i.i.i.i, label %if.then3, label %if.end5
if.then3: ; preds = %_ZNK7WebCore4Node10hasTagNameERKNS_13QualifiedNameE.exit, %land.rhs.i
- %tmp11 = load i8* %str14, align 8
+ %tmp11 = load i8, i8* %str14, align 8
%tmp12 = and i8 %tmp11, 2
%tmp13 = icmp ne i8 %tmp12, 0
br label %return
@@ -55,7 +55,7 @@ if.then7:
br i1 %isTextField, label %if.then9, label %if.end12
if.then9: ; preds = %if.then7
- %tmp23 = load i8* %str5, align 8
+ %tmp23 = load i8, i8* %str5, align 8
%tmp24 = and i8 %tmp23, 2
%tmp25 = icmp ne i8 %tmp24, 0
br label %return
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-atomic-128.ll Fri Feb 27 15:17:42 2015
@@ -169,7 +169,7 @@ define i128 @atomic_load_seq_cst(i128* %
; CHECK-NOT: dmb
; CHECK-LABEL: ldaxp
; CHECK-NOT: dmb
- %r = load atomic i128* %p seq_cst, align 16
+ %r = load atomic i128, i128* %p seq_cst, align 16
ret i128 %r
}
@@ -178,7 +178,7 @@ define i128 @atomic_load_relaxed(i128* %
; CHECK-NOT: dmb
; CHECK: ldxp [[LO:x[0-9]+]], [[HI:x[0-9]+]], [x0]
; CHECK-NOT: dmb
- %r = load atomic i128* %p monotonic, align 16
+ %r = load atomic i128, i128* %p monotonic, align 16
ret i128 %r
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-atomic.ll Fri Feb 27 15:17:42 2015
@@ -107,7 +107,7 @@ define void @seq_cst_fence() {
}
define i32 @atomic_load(i32* %p) {
- %r = load atomic i32* %p seq_cst, align 4
+ %r = load atomic i32, i32* %p seq_cst, align 4
ret i32 %r
; CHECK-LABEL: atomic_load:
; CHECK: ldar
@@ -116,21 +116,21 @@ define i32 @atomic_load(i32* %p) {
define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_8:
%ptr_unsigned = getelementptr i8, i8* %p, i32 4095
- %val_unsigned = load atomic i8* %ptr_unsigned monotonic, align 1
+ %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1
; CHECK: ldrb {{w[0-9]+}}, [x0, #4095]
%ptr_regoff = getelementptr i8, i8* %p, i32 %off32
- %val_regoff = load atomic i8* %ptr_regoff unordered, align 1
+ %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1
%tot1 = add i8 %val_unsigned, %val_regoff
; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
%ptr_unscaled = getelementptr i8, i8* %p, i32 -256
- %val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1
+ %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1
%tot2 = add i8 %tot1, %val_unscaled
; CHECK: ldurb {{w[0-9]+}}, [x0, #-256]
%ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
- %val_random = load atomic i8* %ptr_random unordered, align 1
+ %val_random = load atomic i8, i8* %ptr_random unordered, align 1
%tot3 = add i8 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]]
@@ -141,21 +141,21 @@ define i8 @atomic_load_relaxed_8(i8* %p,
define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_16:
%ptr_unsigned = getelementptr i16, i16* %p, i32 4095
- %val_unsigned = load atomic i16* %ptr_unsigned monotonic, align 2
+ %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2
; CHECK: ldrh {{w[0-9]+}}, [x0, #8190]
%ptr_regoff = getelementptr i16, i16* %p, i32 %off32
- %val_regoff = load atomic i16* %ptr_regoff unordered, align 2
+ %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2
%tot1 = add i16 %val_unsigned, %val_regoff
; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
%ptr_unscaled = getelementptr i16, i16* %p, i32 -128
- %val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2
+ %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2
%tot2 = add i16 %tot1, %val_unscaled
; CHECK: ldurh {{w[0-9]+}}, [x0, #-256]
%ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
- %val_random = load atomic i16* %ptr_random unordered, align 2
+ %val_random = load atomic i16, i16* %ptr_random unordered, align 2
%tot3 = add i16 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]]
@@ -166,21 +166,21 @@ define i16 @atomic_load_relaxed_16(i16*
define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_32:
%ptr_unsigned = getelementptr i32, i32* %p, i32 4095
- %val_unsigned = load atomic i32* %ptr_unsigned monotonic, align 4
+ %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4
; CHECK: ldr {{w[0-9]+}}, [x0, #16380]
%ptr_regoff = getelementptr i32, i32* %p, i32 %off32
- %val_regoff = load atomic i32* %ptr_regoff unordered, align 4
+ %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4
%tot1 = add i32 %val_unsigned, %val_regoff
; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
%ptr_unscaled = getelementptr i32, i32* %p, i32 -64
- %val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4
+ %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4
%tot2 = add i32 %tot1, %val_unscaled
; CHECK: ldur {{w[0-9]+}}, [x0, #-256]
%ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
- %val_random = load atomic i32* %ptr_random unordered, align 4
+ %val_random = load atomic i32, i32* %ptr_random unordered, align 4
%tot3 = add i32 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]]
@@ -191,21 +191,21 @@ define i32 @atomic_load_relaxed_32(i32*
define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) {
; CHECK-LABEL: atomic_load_relaxed_64:
%ptr_unsigned = getelementptr i64, i64* %p, i32 4095
- %val_unsigned = load atomic i64* %ptr_unsigned monotonic, align 8
+ %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8
; CHECK: ldr {{x[0-9]+}}, [x0, #32760]
%ptr_regoff = getelementptr i64, i64* %p, i32 %off32
- %val_regoff = load atomic i64* %ptr_regoff unordered, align 8
+ %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8
%tot1 = add i64 %val_unsigned, %val_regoff
; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
%ptr_unscaled = getelementptr i64, i64* %p, i32 -32
- %val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8
+ %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8
%tot2 = add i64 %tot1, %val_unscaled
; CHECK: ldur {{x[0-9]+}}, [x0, #-256]
%ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
- %val_random = load atomic i64* %ptr_random unordered, align 8
+ %val_random = load atomic i64, i64* %ptr_random unordered, align 8
%tot3 = add i64 %tot2, %val_random
; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]]
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-basic-pic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-basic-pic.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-basic-pic.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-basic-pic.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define i32 @get_globalvar() {
; CHECK-LABEL: get_globalvar:
- %val = load i32* @var
+ %val = load i32, i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
; CHECK: ldr x[[GOTLOC:[0-9]+]], [x[[GOTHI]], :got_lo12:var]
; CHECK: ldr w0, [x[[GOTLOC]]]
@@ -16,7 +16,7 @@ define i32 @get_globalvar() {
define i32* @get_globalvaraddr() {
; CHECK-LABEL: get_globalvaraddr:
- %val = load i32* @var
+ %val = load i32, i32* @var
; CHECK: adrp x[[GOTHI:[0-9]+]], :got:var
; CHECK: ldr x0, [x[[GOTHI]], :got_lo12:var]
@@ -28,7 +28,7 @@ define i32* @get_globalvaraddr() {
define i32 @get_hiddenvar() {
; CHECK-LABEL: get_hiddenvar:
- %val = load i32* @hiddenvar
+ %val = load i32, i32* @hiddenvar
; CHECK: adrp x[[HI:[0-9]+]], hiddenvar
; CHECK: ldr w0, [x[[HI]], :lo12:hiddenvar]
@@ -38,7 +38,7 @@ define i32 @get_hiddenvar() {
define i32* @get_hiddenvaraddr() {
; CHECK-LABEL: get_hiddenvaraddr:
- %val = load i32* @hiddenvar
+ %val = load i32, i32* @hiddenvar
; CHECK: adrp [[HI:x[0-9]+]], hiddenvar
; CHECK: add x0, [[HI]], :lo12:hiddenvar
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-bcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-bcc.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-bcc.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-bcc.ll Fri Feb 27 15:17:42 2015
@@ -28,9 +28,9 @@ define { i64, i1 } @foo(i64* , %Sstruct*
entry:
%.sroa.0 = alloca i72, align 16
%.count.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 0, i32 0
- %4 = load i64* %.count.value, align 8
+ %4 = load i64, i64* %.count.value, align 8
%.repeatedValue.value = getelementptr inbounds %Sstruct, %Sstruct* %1, i64 0, i32 1, i32 0
- %5 = load i32* %.repeatedValue.value, align 8
+ %5 = load i32, i32* %.repeatedValue.value, align 8
%6 = icmp eq i64 %4, 0
br label %7
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-bitconverts.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define void @test_i64_f64(double* %p, i64* %q) {
; CHECK: ldr
; CHECK: str
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = bitcast double %2 to i64
%4 = add i64 %3, %3
@@ -17,7 +17,7 @@ define void @test_i64_f64(double* %p, i6
define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) {
; CHECK: ldr
; CHECK: str
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = bitcast <1 x i64> %2 to i64
%4 = add i64 %3, %3
@@ -30,7 +30,7 @@ define void @test_i64_v2f32(<2 x float>*
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: str
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = bitcast <2 x float> %2 to i64
%4 = add i64 %3, %3
@@ -43,7 +43,7 @@ define void @test_i64_v2i32(<2 x i32>* %
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: str
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = bitcast <2 x i32> %2 to i64
%4 = add i64 %3, %3
@@ -56,7 +56,7 @@ define void @test_i64_v4i16(<4 x i16>* %
; CHECK: ld1 { v{{[0-9]+}}.4h }
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: str
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = bitcast <4 x i16> %2 to i64
%4 = add i64 %3, %3
@@ -69,7 +69,7 @@ define void @test_i64_v8i8(<8 x i8>* %p,
; CHECK: ld1 { v{{[0-9]+}}.8b }
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: str
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = bitcast <8 x i8> %2 to i64
%4 = add i64 %3, %3
@@ -81,7 +81,7 @@ define void @test_i64_v8i8(<8 x i8>* %p,
define void @test_f64_i64(i64* %p, double* %q) {
; CHECK: ldr
; CHECK: str
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = bitcast i64 %2 to double
%4 = fadd double %3, %3
@@ -93,7 +93,7 @@ define void @test_f64_i64(i64* %p, doubl
define void @test_f64_v1i64(<1 x i64>* %p, double* %q) {
; CHECK: ldr
; CHECK: str
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = bitcast <1 x i64> %2 to double
%4 = fadd double %3, %3
@@ -106,7 +106,7 @@ define void @test_f64_v2f32(<2 x float>*
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: str
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = bitcast <2 x float> %2 to double
%4 = fadd double %3, %3
@@ -119,7 +119,7 @@ define void @test_f64_v2i32(<2 x i32>* %
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: str
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = bitcast <2 x i32> %2 to double
%4 = fadd double %3, %3
@@ -132,7 +132,7 @@ define void @test_f64_v4i16(<4 x i16>* %
; CHECK: ld1 { v{{[0-9]+}}.4h }
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: str
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = bitcast <4 x i16> %2 to double
%4 = fadd double %3, %3
@@ -145,7 +145,7 @@ define void @test_f64_v8i8(<8 x i8>* %p,
; CHECK: ld1 { v{{[0-9]+}}.8b }
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: str
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = bitcast <8 x i8> %2 to double
%4 = fadd double %3, %3
@@ -157,7 +157,7 @@ define void @test_f64_v8i8(<8 x i8>* %p,
define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
; CHECK: ldr
; CHECK: str
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = bitcast i64 %2 to <1 x i64>
%4 = add <1 x i64> %3, %3
@@ -169,7 +169,7 @@ define void @test_v1i64_i64(i64* %p, <1
define void @test_v1i64_f64(double* %p, <1 x i64>* %q) {
; CHECK: ldr
; CHECK: str
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = bitcast double %2 to <1 x i64>
%4 = add <1 x i64> %3, %3
@@ -182,7 +182,7 @@ define void @test_v1i64_v2f32(<2 x float
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: str
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = bitcast <2 x float> %2 to <1 x i64>
%4 = add <1 x i64> %3, %3
@@ -195,7 +195,7 @@ define void @test_v1i64_v2i32(<2 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: str
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = bitcast <2 x i32> %2 to <1 x i64>
%4 = add <1 x i64> %3, %3
@@ -208,7 +208,7 @@ define void @test_v1i64_v4i16(<4 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.4h }
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: str
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = bitcast <4 x i16> %2 to <1 x i64>
%4 = add <1 x i64> %3, %3
@@ -221,7 +221,7 @@ define void @test_v1i64_v8i8(<8 x i8>* %
; CHECK: ld1 { v{{[0-9]+}}.8b }
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: str
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = bitcast <8 x i8> %2 to <1 x i64>
%4 = add <1 x i64> %3, %3
@@ -234,7 +234,7 @@ define void @test_v2f32_i64(i64* %p, <2
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = bitcast i64 %2 to <2 x float>
%4 = fadd <2 x float> %3, %3
@@ -247,7 +247,7 @@ define void @test_v2f32_f64(double* %p,
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = bitcast double %2 to <2 x float>
%4 = fadd <2 x float> %3, %3
@@ -260,7 +260,7 @@ define void @test_v2f32_v1i64(<1 x i64>*
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = bitcast <1 x i64> %2 to <2 x float>
%4 = fadd <2 x float> %3, %3
@@ -272,7 +272,7 @@ define void @test_v2f32_v1i64(<1 x i64>*
define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) {
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = bitcast <2 x i32> %2 to <2 x float>
%4 = fadd <2 x float> %3, %3
@@ -285,7 +285,7 @@ define void @test_v2f32_v4i16(<4 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.4h }
; CHECK: rev32 v{{[0-9]+}}.4h
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = bitcast <4 x i16> %2 to <2 x float>
%4 = fadd <2 x float> %3, %3
@@ -298,7 +298,7 @@ define void @test_v2f32_v8i8(<8 x i8>* %
; CHECK: ld1 { v{{[0-9]+}}.8b }
; CHECK: rev32 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = bitcast <8 x i8> %2 to <2 x float>
%4 = fadd <2 x float> %3, %3
@@ -311,7 +311,7 @@ define void @test_v2i32_i64(i64* %p, <2
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = bitcast i64 %2 to <2 x i32>
%4 = add <2 x i32> %3, %3
@@ -324,7 +324,7 @@ define void @test_v2i32_f64(double* %p,
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = bitcast double %2 to <2 x i32>
%4 = add <2 x i32> %3, %3
@@ -337,7 +337,7 @@ define void @test_v2i32_v1i64(<1 x i64>*
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = bitcast <1 x i64> %2 to <2 x i32>
%4 = add <2 x i32> %3, %3
@@ -349,7 +349,7 @@ define void @test_v2i32_v1i64(<1 x i64>*
define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) {
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = bitcast <2 x float> %2 to <2 x i32>
%4 = add <2 x i32> %3, %3
@@ -362,7 +362,7 @@ define void @test_v2i32_v4i16(<4 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.4h }
; CHECK: rev32 v{{[0-9]+}}.4h
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = bitcast <4 x i16> %2 to <2 x i32>
%4 = add <2 x i32> %3, %3
@@ -375,7 +375,7 @@ define void @test_v2i32_v8i8(<8 x i8>* %
; CHECK: ld1 { v{{[0-9]+}}.8b }
; CHECK: rev32 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.2s }
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = bitcast <8 x i8> %2 to <2 x i32>
%4 = add <2 x i32> %3, %3
@@ -388,7 +388,7 @@ define void @test_v4i16_i64(i64* %p, <4
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: st1 { v{{[0-9]+}}.4h }
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = bitcast i64 %2 to <4 x i16>
%4 = add <4 x i16> %3, %3
@@ -401,7 +401,7 @@ define void @test_v4i16_f64(double* %p,
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: st1 { v{{[0-9]+}}.4h }
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = bitcast double %2 to <4 x i16>
%4 = add <4 x i16> %3, %3
@@ -414,7 +414,7 @@ define void @test_v4i16_v1i64(<1 x i64>*
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: st1 { v{{[0-9]+}}.4h }
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = bitcast <1 x i64> %2 to <4 x i16>
%4 = add <4 x i16> %3, %3
@@ -427,7 +427,7 @@ define void @test_v4i16_v2f32(<2 x float
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev32 v{{[0-9]+}}.4h
; CHECK: st1 { v{{[0-9]+}}.4h }
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = bitcast <2 x float> %2 to <4 x i16>
%4 = add <4 x i16> %3, %3
@@ -440,7 +440,7 @@ define void @test_v4i16_v2i32(<2 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev32 v{{[0-9]+}}.4h
; CHECK: st1 { v{{[0-9]+}}.4h }
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = bitcast <2 x i32> %2 to <4 x i16>
%4 = add <4 x i16> %3, %3
@@ -453,7 +453,7 @@ define void @test_v4i16_v8i8(<8 x i8>* %
; CHECK: ld1 { v{{[0-9]+}}.8b }
; CHECK: rev16 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.4h }
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = bitcast <8 x i8> %2 to <4 x i16>
%4 = add <4 x i16> %3, %3
@@ -466,7 +466,7 @@ define void @test_v8i8_i64(i64* %p, <8 x
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.8b }
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = bitcast i64 %2 to <8 x i8>
%4 = add <8 x i8> %3, %3
@@ -479,7 +479,7 @@ define void @test_v8i8_f64(double* %p, <
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.8b }
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = bitcast double %2 to <8 x i8>
%4 = add <8 x i8> %3, %3
@@ -492,7 +492,7 @@ define void @test_v8i8_v1i64(<1 x i64>*
; CHECK: ldr
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.8b }
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = bitcast <1 x i64> %2 to <8 x i8>
%4 = add <8 x i8> %3, %3
@@ -505,7 +505,7 @@ define void @test_v8i8_v2f32(<2 x float>
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev32 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.8b }
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = bitcast <2 x float> %2 to <8 x i8>
%4 = add <8 x i8> %3, %3
@@ -518,7 +518,7 @@ define void @test_v8i8_v2i32(<2 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.2s }
; CHECK: rev32 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.8b }
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = bitcast <2 x i32> %2 to <8 x i8>
%4 = add <8 x i8> %3, %3
@@ -531,7 +531,7 @@ define void @test_v8i8_v4i16(<4 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.4h }
; CHECK: rev16 v{{[0-9]+}}.8b
; CHECK: st1 { v{{[0-9]+}}.8b }
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = bitcast <4 x i16> %2 to <8 x i8>
%4 = add <8 x i8> %3, %3
@@ -544,7 +544,7 @@ define void @test_f128_v2f64(<2 x double
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: ext
; CHECK: str
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = bitcast <2 x double> %2 to fp128
%4 = fadd fp128 %3, %3
@@ -557,7 +557,7 @@ define void @test_f128_v2i64(<2 x i64>*
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: ext
; CHECK: str
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = bitcast <2 x i64> %2 to fp128
%4 = fadd fp128 %3, %3
@@ -572,7 +572,7 @@ define void @test_f128_v4f32(<4 x float>
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
; CHECK: str q
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = bitcast <4 x float> %2 to fp128
%4 = fadd fp128 %3, %3
@@ -586,7 +586,7 @@ define void @test_f128_v4i32(<4 x i32>*
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
; CHECK: str
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = bitcast <4 x i32> %2 to fp128
%4 = fadd fp128 %3, %3
@@ -600,7 +600,7 @@ define void @test_f128_v8i16(<8 x i16>*
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
; CHECK: str
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = bitcast <8 x i16> %2 to fp128
%4 = fadd fp128 %3, %3
@@ -613,7 +613,7 @@ define void @test_f128_v16i8(<16 x i8>*
; CHECK: ld1 { v{{[0-9]+}}.16b }
; CHECK: ext
; CHECK: str q
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = bitcast <16 x i8> %2 to fp128
%4 = fadd fp128 %3, %3
@@ -626,7 +626,7 @@ define void @test_v2f64_f128(fp128* %p,
; CHECK: ldr
; CHECK: ext
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = bitcast fp128 %2 to <2 x double>
%4 = fadd <2 x double> %3, %3
@@ -638,7 +638,7 @@ define void @test_v2f64_f128(fp128* %p,
define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) {
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = bitcast <2 x i64> %2 to <2 x double>
%4 = fadd <2 x double> %3, %3
@@ -652,7 +652,7 @@ define void @test_v2f64_v4f32(<4 x float
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = bitcast <4 x float> %2 to <2 x double>
%4 = fadd <2 x double> %3, %3
@@ -665,7 +665,7 @@ define void @test_v2f64_v4i32(<4 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.4s }
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = bitcast <4 x i32> %2 to <2 x double>
%4 = fadd <2 x double> %3, %3
@@ -678,7 +678,7 @@ define void @test_v2f64_v8i16(<8 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.8h }
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = bitcast <8 x i16> %2 to <2 x double>
%4 = fadd <2 x double> %3, %3
@@ -691,7 +691,7 @@ define void @test_v2f64_v16i8(<16 x i8>*
; CHECK: ld1 { v{{[0-9]+}}.16b }
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = bitcast <16 x i8> %2 to <2 x double>
%4 = fadd <2 x double> %3, %3
@@ -704,7 +704,7 @@ define void @test_v2i64_f128(fp128* %p,
; CHECK: ldr
; CHECK: ext
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = bitcast fp128 %2 to <2 x i64>
%4 = add <2 x i64> %3, %3
@@ -716,7 +716,7 @@ define void @test_v2i64_f128(fp128* %p,
define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = bitcast <2 x double> %2 to <2 x i64>
%4 = add <2 x i64> %3, %3
@@ -730,7 +730,7 @@ define void @test_v2i64_v4f32(<4 x float
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = bitcast <4 x float> %2 to <2 x i64>
%4 = add <2 x i64> %3, %3
@@ -743,7 +743,7 @@ define void @test_v2i64_v4i32(<4 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.4s }
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = bitcast <4 x i32> %2 to <2 x i64>
%4 = add <2 x i64> %3, %3
@@ -756,7 +756,7 @@ define void @test_v2i64_v8i16(<8 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.8h }
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = bitcast <8 x i16> %2 to <2 x i64>
%4 = add <2 x i64> %3, %3
@@ -769,7 +769,7 @@ define void @test_v2i64_v16i8(<16 x i8>*
; CHECK: ld1 { v{{[0-9]+}}.16b }
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = bitcast <16 x i8> %2 to <2 x i64>
%4 = add <2 x i64> %3, %3
@@ -784,7 +784,7 @@ define void @test_v4f32_f128(fp128* %p,
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = bitcast fp128 %2 to <4 x float>
%4 = fadd <4 x float> %3, %3
@@ -798,7 +798,7 @@ define void @test_v4f32_v2f64(<2 x doubl
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = bitcast <2 x double> %2 to <4 x float>
%4 = fadd <4 x float> %3, %3
@@ -812,7 +812,7 @@ define void @test_v4f32_v2i64(<2 x i64>*
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = bitcast <2 x i64> %2 to <4 x float>
%4 = fadd <4 x float> %3, %3
@@ -825,7 +825,7 @@ define void @test_v4f32_v4i32(<4 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.4s }
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = bitcast <4 x i32> %2 to <4 x float>
%4 = fadd <4 x float> %3, %3
@@ -839,7 +839,7 @@ define void @test_v4f32_v8i16(<8 x i16>*
; CHECK: rev32 v{{[0-9]+}}.8h
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = bitcast <8 x i16> %2 to <4 x float>
%4 = fadd <4 x float> %3, %3
@@ -853,7 +853,7 @@ define void @test_v4f32_v16i8(<16 x i8>*
; CHECK: rev32 v{{[0-9]+}}.16b
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.2d }
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = bitcast <16 x i8> %2 to <4 x float>
%4 = fadd <4 x float> %3, %3
@@ -867,7 +867,7 @@ define void @test_v4i32_f128(fp128* %p,
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
; CHECK: st1 { v{{[0-9]+}}.4s }
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = bitcast fp128 %2 to <4 x i32>
%4 = add <4 x i32> %3, %3
@@ -880,7 +880,7 @@ define void @test_v4i32_v2f64(<2 x doubl
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.4s }
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = bitcast <2 x double> %2 to <4 x i32>
%4 = add <4 x i32> %3, %3
@@ -893,7 +893,7 @@ define void @test_v4i32_v2i64(<2 x i64>*
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.4s }
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = bitcast <2 x i64> %2 to <4 x i32>
%4 = add <4 x i32> %3, %3
@@ -906,7 +906,7 @@ define void @test_v4i32_v4f32(<4 x float
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: st1 { v{{[0-9]+}}.4s }
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = bitcast <4 x float> %2 to <4 x i32>
%4 = add <4 x i32> %3, %3
@@ -919,7 +919,7 @@ define void @test_v4i32_v8i16(<8 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.8h }
; CHECK: rev32 v{{[0-9]+}}.8h
; CHECK: st1 { v{{[0-9]+}}.4s }
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = bitcast <8 x i16> %2 to <4 x i32>
%4 = add <4 x i32> %3, %3
@@ -932,7 +932,7 @@ define void @test_v4i32_v16i8(<16 x i8>*
; CHECK: ld1 { v{{[0-9]+}}.16b }
; CHECK: rev32 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.4s }
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = bitcast <16 x i8> %2 to <4 x i32>
%4 = add <4 x i32> %3, %3
@@ -946,7 +946,7 @@ define void @test_v8i16_f128(fp128* %p,
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
; CHECK: st1 { v{{[0-9]+}}.8h }
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = bitcast fp128 %2 to <8 x i16>
%4 = add <8 x i16> %3, %3
@@ -959,7 +959,7 @@ define void @test_v8i16_v2f64(<2 x doubl
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: st1 { v{{[0-9]+}}.8h }
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = bitcast <2 x double> %2 to <8 x i16>
%4 = add <8 x i16> %3, %3
@@ -972,7 +972,7 @@ define void @test_v8i16_v2i64(<2 x i64>*
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: st1 { v{{[0-9]+}}.8h }
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = bitcast <2 x i64> %2 to <8 x i16>
%4 = add <8 x i16> %3, %3
@@ -986,7 +986,7 @@ define void @test_v8i16_v4f32(<4 x float
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: rev32 v{{[0-9]+}}.8h
; CHECK: st1 { v{{[0-9]+}}.8h }
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = bitcast <4 x float> %2 to <8 x i16>
%4 = add <8 x i16> %3, %3
@@ -999,7 +999,7 @@ define void @test_v8i16_v4i32(<4 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.4s }
; CHECK: rev32 v{{[0-9]+}}.8h
; CHECK: st1 { v{{[0-9]+}}.8h }
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = bitcast <4 x i32> %2 to <8 x i16>
%4 = add <8 x i16> %3, %3
@@ -1012,7 +1012,7 @@ define void @test_v8i16_v16i8(<16 x i8>*
; CHECK: ld1 { v{{[0-9]+}}.16b }
; CHECK: rev16 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.8h }
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = bitcast <16 x i8> %2 to <8 x i16>
%4 = add <8 x i16> %3, %3
@@ -1026,7 +1026,7 @@ define void @test_v16i8_f128(fp128* %p,
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
; CHECK: st1 { v{{[0-9]+}}.16b }
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = bitcast fp128 %2 to <16 x i8>
%4 = add <16 x i8> %3, %3
@@ -1039,7 +1039,7 @@ define void @test_v16i8_v2f64(<2 x doubl
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.16b }
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = bitcast <2 x double> %2 to <16 x i8>
%4 = add <16 x i8> %3, %3
@@ -1052,7 +1052,7 @@ define void @test_v16i8_v2i64(<2 x i64>*
; CHECK: ld1 { v{{[0-9]+}}.2d }
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.16b }
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = bitcast <2 x i64> %2 to <16 x i8>
%4 = add <16 x i8> %3, %3
@@ -1066,7 +1066,7 @@ define void @test_v16i8_v4f32(<4 x float
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: rev32 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.16b }
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = bitcast <4 x float> %2 to <16 x i8>
%4 = add <16 x i8> %3, %3
@@ -1079,7 +1079,7 @@ define void @test_v16i8_v4i32(<4 x i32>*
; CHECK: ld1 { v{{[0-9]+}}.4s }
; CHECK: rev32 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.16b }
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = bitcast <4 x i32> %2 to <16 x i8>
%4 = add <16 x i8> %3, %3
@@ -1092,7 +1092,7 @@ define void @test_v16i8_v8i16(<8 x i16>*
; CHECK: ld1 { v{{[0-9]+}}.8h }
; CHECK: rev16 v{{[0-9]+}}.16b
; CHECK: st1 { v{{[0-9]+}}.16b }
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = bitcast <8 x i16> %2 to <16 x i8>
%4 = add <16 x i8> %3, %3
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-varargs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-varargs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-varargs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-varargs.ll Fri Feb 27 15:17:42 2015
@@ -22,7 +22,7 @@ entry:
%vl1 = bitcast %struct.__va_list* %vl to i8*
call void @llvm.va_start(i8* %vl1)
%vr_offs_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 4
- %vr_offs = load i32* %vr_offs_p, align 4
+ %vr_offs = load i32, i32* %vr_offs_p, align 4
%0 = icmp sgt i32 %vr_offs, -1
br i1 %0, label %vaarg.on_stack, label %vaarg.maybe_reg
@@ -34,7 +34,7 @@ vaarg.maybe_reg:
vaarg.in_reg: ; preds = %vaarg.maybe_reg
%reg_top_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 2
- %reg_top = load i8** %reg_top_p, align 8
+ %reg_top = load i8*, i8** %reg_top_p, align 8
%1 = sext i32 %vr_offs to i64
%2 = getelementptr i8, i8* %reg_top, i64 %1
%3 = ptrtoint i8* %2 to i64
@@ -44,7 +44,7 @@ vaarg.in_reg:
vaarg.on_stack: ; preds = %vaarg.maybe_reg, %entry
%stack_p = getelementptr inbounds %struct.__va_list, %struct.__va_list* %vl, i64 0, i32 0
- %stack = load i8** %stack_p, align 8
+ %stack = load i8*, i8** %stack_p, align 8
%new_stack = getelementptr i8, i8* %stack, i64 8
store i8* %new_stack, i8** %stack_p, align 8
br label %vaarg.end
@@ -52,7 +52,7 @@ vaarg.on_stack:
vaarg.end: ; preds = %vaarg.on_stack, %vaarg.in_reg
%.sink = phi i8* [ %4, %vaarg.in_reg ], [ %stack, %vaarg.on_stack ]
%5 = bitcast i8* %.sink to double*
- %6 = load double* %5, align 8
+ %6 = load double, double* %5, align 8
call void @llvm.va_end(i8* %vl1)
ret double %6
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-big-endian-vector-caller.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
declare i64 @test_i64_f64_helper(double %p)
define void @test_i64_f64(double* %p, i64* %q) {
; CHECK-NOT: rev
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = call i64 @test_i64_f64_helper(double %2)
%4 = add i64 %3, %3
@@ -17,7 +17,7 @@ define void @test_i64_f64(double* %p, i6
declare i64 @test_i64_v1i64_helper(<1 x i64> %p)
define void @test_i64_v1i64(<1 x i64>* %p, i64* %q) {
; CHECK-NOT: rev
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = call i64 @test_i64_v1i64_helper(<1 x i64> %2)
%4 = add i64 %3, %3
@@ -29,7 +29,7 @@ define void @test_i64_v1i64(<1 x i64>* %
declare i64 @test_i64_v2f32_helper(<2 x float> %p)
define void @test_i64_v2f32(<2 x float>* %p, i64* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = call i64 @test_i64_v2f32_helper(<2 x float> %2)
%4 = add i64 %3, %3
@@ -41,7 +41,7 @@ define void @test_i64_v2f32(<2 x float>*
declare i64 @test_i64_v2i32_helper(<2 x i32> %p)
define void @test_i64_v2i32(<2 x i32>* %p, i64* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = call i64 @test_i64_v2i32_helper(<2 x i32> %2)
%4 = add i64 %3, %3
@@ -53,7 +53,7 @@ define void @test_i64_v2i32(<2 x i32>* %
declare i64 @test_i64_v4i16_helper(<4 x i16> %p)
define void @test_i64_v4i16(<4 x i16>* %p, i64* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = call i64 @test_i64_v4i16_helper(<4 x i16> %2)
%4 = add i64 %3, %3
@@ -65,7 +65,7 @@ define void @test_i64_v4i16(<4 x i16>* %
declare i64 @test_i64_v8i8_helper(<8 x i8> %p)
define void @test_i64_v8i8(<8 x i8>* %p, i64* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = call i64 @test_i64_v8i8_helper(<8 x i8> %2)
%4 = add i64 %3, %3
@@ -77,7 +77,7 @@ define void @test_i64_v8i8(<8 x i8>* %p,
declare double @test_f64_i64_helper(i64 %p)
define void @test_f64_i64(i64* %p, double* %q) {
; CHECK-NOT: rev
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = call double @test_f64_i64_helper(i64 %2)
%4 = fadd double %3, %3
@@ -89,7 +89,7 @@ define void @test_f64_i64(i64* %p, doubl
declare double @test_f64_v1i64_helper(<1 x i64> %p)
define void @test_f64_v1i64(<1 x i64>* %p, double* %q) {
; CHECK-NOT: rev
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = call double @test_f64_v1i64_helper(<1 x i64> %2)
%4 = fadd double %3, %3
@@ -101,7 +101,7 @@ define void @test_f64_v1i64(<1 x i64>* %
declare double @test_f64_v2f32_helper(<2 x float> %p)
define void @test_f64_v2f32(<2 x float>* %p, double* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = call double @test_f64_v2f32_helper(<2 x float> %2)
%4 = fadd double %3, %3
@@ -113,7 +113,7 @@ define void @test_f64_v2f32(<2 x float>*
declare double @test_f64_v2i32_helper(<2 x i32> %p)
define void @test_f64_v2i32(<2 x i32>* %p, double* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = call double @test_f64_v2i32_helper(<2 x i32> %2)
%4 = fadd double %3, %3
@@ -125,7 +125,7 @@ define void @test_f64_v2i32(<2 x i32>* %
declare double @test_f64_v4i16_helper(<4 x i16> %p)
define void @test_f64_v4i16(<4 x i16>* %p, double* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = call double @test_f64_v4i16_helper(<4 x i16> %2)
%4 = fadd double %3, %3
@@ -137,7 +137,7 @@ define void @test_f64_v4i16(<4 x i16>* %
declare double @test_f64_v8i8_helper(<8 x i8> %p)
define void @test_f64_v8i8(<8 x i8>* %p, double* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = call double @test_f64_v8i8_helper(<8 x i8> %2)
%4 = fadd double %3, %3
@@ -149,7 +149,7 @@ define void @test_f64_v8i8(<8 x i8>* %p,
declare <1 x i64> @test_v1i64_i64_helper(i64 %p)
define void @test_v1i64_i64(i64* %p, <1 x i64>* %q) {
; CHECK-NOT: rev
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = call <1 x i64> @test_v1i64_i64_helper(i64 %2)
%4 = add <1 x i64> %3, %3
@@ -161,7 +161,7 @@ define void @test_v1i64_i64(i64* %p, <1
declare <1 x i64> @test_v1i64_f64_helper(double %p)
define void @test_v1i64_f64(double* %p, <1 x i64>* %q) {
; CHECK-NOT: rev
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = call <1 x i64> @test_v1i64_f64_helper(double %2)
%4 = add <1 x i64> %3, %3
@@ -173,7 +173,7 @@ define void @test_v1i64_f64(double* %p,
declare <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %p)
define void @test_v1i64_v2f32(<2 x float>* %p, <1 x i64>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = call <1 x i64> @test_v1i64_v2f32_helper(<2 x float> %2)
%4 = add <1 x i64> %3, %3
@@ -185,7 +185,7 @@ define void @test_v1i64_v2f32(<2 x float
declare <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %p)
define void @test_v1i64_v2i32(<2 x i32>* %p, <1 x i64>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = call <1 x i64> @test_v1i64_v2i32_helper(<2 x i32> %2)
%4 = add <1 x i64> %3, %3
@@ -197,7 +197,7 @@ define void @test_v1i64_v2i32(<2 x i32>*
declare <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %p)
define void @test_v1i64_v4i16(<4 x i16>* %p, <1 x i64>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = call <1 x i64> @test_v1i64_v4i16_helper(<4 x i16> %2)
%4 = add <1 x i64> %3, %3
@@ -209,7 +209,7 @@ define void @test_v1i64_v4i16(<4 x i16>*
declare <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %p)
define void @test_v1i64_v8i8(<8 x i8>* %p, <1 x i64>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = call <1 x i64> @test_v1i64_v8i8_helper(<8 x i8> %2)
%4 = add <1 x i64> %3, %3
@@ -221,7 +221,7 @@ define void @test_v1i64_v8i8(<8 x i8>* %
declare <2 x float> @test_v2f32_i64_helper(i64 %p)
define void @test_v2f32_i64(i64* %p, <2 x float>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = call <2 x float> @test_v2f32_i64_helper(i64 %2)
%4 = fadd <2 x float> %3, %3
@@ -233,7 +233,7 @@ define void @test_v2f32_i64(i64* %p, <2
declare <2 x float> @test_v2f32_f64_helper(double %p)
define void @test_v2f32_f64(double* %p, <2 x float>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = call <2 x float> @test_v2f32_f64_helper(double %2)
%4 = fadd <2 x float> %3, %3
@@ -245,7 +245,7 @@ define void @test_v2f32_f64(double* %p,
declare <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %p)
define void @test_v2f32_v1i64(<1 x i64>* %p, <2 x float>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = call <2 x float> @test_v2f32_v1i64_helper(<1 x i64> %2)
%4 = fadd <2 x float> %3, %3
@@ -258,7 +258,7 @@ declare <2 x float> @test_v2f32_v2i32_he
define void @test_v2f32_v2i32(<2 x i32>* %p, <2 x float>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = call <2 x float> @test_v2f32_v2i32_helper(<2 x i32> %2)
%4 = fadd <2 x float> %3, %3
@@ -271,7 +271,7 @@ declare <2 x float> @test_v2f32_v4i16_he
define void @test_v2f32_v4i16(<4 x i16>* %p, <2 x float>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = call <2 x float> @test_v2f32_v4i16_helper(<4 x i16> %2)
%4 = fadd <2 x float> %3, %3
@@ -284,7 +284,7 @@ declare <2 x float> @test_v2f32_v8i8_hel
define void @test_v2f32_v8i8(<8 x i8>* %p, <2 x float>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = call <2 x float> @test_v2f32_v8i8_helper(<8 x i8> %2)
%4 = fadd <2 x float> %3, %3
@@ -296,7 +296,7 @@ define void @test_v2f32_v8i8(<8 x i8>* %
declare <2 x i32> @test_v2i32_i64_helper(i64 %p)
define void @test_v2i32_i64(i64* %p, <2 x i32>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = call <2 x i32> @test_v2i32_i64_helper(i64 %2)
%4 = add <2 x i32> %3, %3
@@ -308,7 +308,7 @@ define void @test_v2i32_i64(i64* %p, <2
declare <2 x i32> @test_v2i32_f64_helper(double %p)
define void @test_v2i32_f64(double* %p, <2 x i32>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = call <2 x i32> @test_v2i32_f64_helper(double %2)
%4 = add <2 x i32> %3, %3
@@ -320,7 +320,7 @@ define void @test_v2i32_f64(double* %p,
declare <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %p)
define void @test_v2i32_v1i64(<1 x i64>* %p, <2 x i32>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = call <2 x i32> @test_v2i32_v1i64_helper(<1 x i64> %2)
%4 = add <2 x i32> %3, %3
@@ -333,7 +333,7 @@ declare <2 x i32> @test_v2i32_v2f32_help
define void @test_v2i32_v2f32(<2 x float>* %p, <2 x i32>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = call <2 x i32> @test_v2i32_v2f32_helper(<2 x float> %2)
%4 = add <2 x i32> %3, %3
@@ -346,7 +346,7 @@ declare <2 x i32> @test_v2i32_v4i16_help
define void @test_v2i32_v4i16(<4 x i16>* %p, <2 x i32>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = call <2 x i32> @test_v2i32_v4i16_helper(<4 x i16> %2)
%4 = add <2 x i32> %3, %3
@@ -359,7 +359,7 @@ declare <2 x i32> @test_v2i32_v8i8_helpe
define void @test_v2i32_v8i8(<8 x i8>* %p, <2 x i32>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: rev64 v{{[0-9]+}}.2s
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = call <2 x i32> @test_v2i32_v8i8_helper(<8 x i8> %2)
%4 = add <2 x i32> %3, %3
@@ -371,7 +371,7 @@ define void @test_v2i32_v8i8(<8 x i8>* %
declare <4 x i16> @test_v4i16_i64_helper(i64 %p)
define void @test_v4i16_i64(i64* %p, <4 x i16>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = call <4 x i16> @test_v4i16_i64_helper(i64 %2)
%4 = add <4 x i16> %3, %3
@@ -383,7 +383,7 @@ define void @test_v4i16_i64(i64* %p, <4
declare <4 x i16> @test_v4i16_f64_helper(double %p)
define void @test_v4i16_f64(double* %p, <4 x i16>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = call <4 x i16> @test_v4i16_f64_helper(double %2)
%4 = add <4 x i16> %3, %3
@@ -395,7 +395,7 @@ define void @test_v4i16_f64(double* %p,
declare <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %p)
define void @test_v4i16_v1i64(<1 x i64>* %p, <4 x i16>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = call <4 x i16> @test_v4i16_v1i64_helper(<1 x i64> %2)
%4 = add <4 x i16> %3, %3
@@ -408,7 +408,7 @@ declare <4 x i16> @test_v4i16_v2f32_help
define void @test_v4i16_v2f32(<2 x float>* %p, <4 x i16>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = call <4 x i16> @test_v4i16_v2f32_helper(<2 x float> %2)
%4 = add <4 x i16> %3, %3
@@ -421,7 +421,7 @@ declare <4 x i16> @test_v4i16_v2i32_help
define void @test_v4i16_v2i32(<2 x i32>* %p, <4 x i16>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = call <4 x i16> @test_v4i16_v2i32_helper(<2 x i32> %2)
%4 = add <4 x i16> %3, %3
@@ -434,7 +434,7 @@ declare <4 x i16> @test_v4i16_v8i8_helpe
define void @test_v4i16_v8i8(<8 x i8>* %p, <4 x i16>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
; CHECK: rev64 v{{[0-9]+}}.4h
- %1 = load <8 x i8>* %p
+ %1 = load <8 x i8>, <8 x i8>* %p
%2 = add <8 x i8> %1, %1
%3 = call <4 x i16> @test_v4i16_v8i8_helper(<8 x i8> %2)
%4 = add <4 x i16> %3, %3
@@ -446,7 +446,7 @@ define void @test_v4i16_v8i8(<8 x i8>* %
declare <8 x i8> @test_v8i8_i64_helper(i64 %p)
define void @test_v8i8_i64(i64* %p, <8 x i8>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load i64* %p
+ %1 = load i64, i64* %p
%2 = add i64 %1, %1
%3 = call <8 x i8> @test_v8i8_i64_helper(i64 %2)
%4 = add <8 x i8> %3, %3
@@ -458,7 +458,7 @@ define void @test_v8i8_i64(i64* %p, <8 x
declare <8 x i8> @test_v8i8_f64_helper(double %p)
define void @test_v8i8_f64(double* %p, <8 x i8>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load double* %p
+ %1 = load double, double* %p
%2 = fadd double %1, %1
%3 = call <8 x i8> @test_v8i8_f64_helper(double %2)
%4 = add <8 x i8> %3, %3
@@ -470,7 +470,7 @@ define void @test_v8i8_f64(double* %p, <
declare <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %p)
define void @test_v8i8_v1i64(<1 x i64>* %p, <8 x i8>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load <1 x i64>* %p
+ %1 = load <1 x i64>, <1 x i64>* %p
%2 = add <1 x i64> %1, %1
%3 = call <8 x i8> @test_v8i8_v1i64_helper(<1 x i64> %2)
%4 = add <8 x i8> %3, %3
@@ -483,7 +483,7 @@ declare <8 x i8> @test_v8i8_v2f32_helper
define void @test_v8i8_v2f32(<2 x float>* %p, <8 x i8>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load <2 x float>* %p
+ %1 = load <2 x float>, <2 x float>* %p
%2 = fadd <2 x float> %1, %1
%3 = call <8 x i8> @test_v8i8_v2f32_helper(<2 x float> %2)
%4 = add <8 x i8> %3, %3
@@ -496,7 +496,7 @@ declare <8 x i8> @test_v8i8_v2i32_helper
define void @test_v8i8_v2i32(<2 x i32>* %p, <8 x i8>* %q) {
; CHECK: rev64 v{{[0-9]+}}.2s
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load <2 x i32>* %p
+ %1 = load <2 x i32>, <2 x i32>* %p
%2 = add <2 x i32> %1, %1
%3 = call <8 x i8> @test_v8i8_v2i32_helper(<2 x i32> %2)
%4 = add <8 x i8> %3, %3
@@ -509,7 +509,7 @@ declare <8 x i8> @test_v8i8_v4i16_helper
define void @test_v8i8_v4i16(<4 x i16>* %p, <8 x i8>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4h
; CHECK: rev64 v{{[0-9]+}}.8b
- %1 = load <4 x i16>* %p
+ %1 = load <4 x i16>, <4 x i16>* %p
%2 = add <4 x i16> %1, %1
%3 = call <8 x i8> @test_v8i8_v4i16_helper(<4 x i16> %2)
%4 = add <8 x i8> %3, %3
@@ -521,7 +521,7 @@ define void @test_v8i8_v4i16(<4 x i16>*
declare fp128 @test_f128_v2f64_helper(<2 x double> %p)
define void @test_f128_v2f64(<2 x double>* %p, fp128* %q) {
; CHECK: ext
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = call fp128 @test_f128_v2f64_helper(<2 x double> %2)
%4 = fadd fp128 %3, %3
@@ -533,7 +533,7 @@ define void @test_f128_v2f64(<2 x double
declare fp128 @test_f128_v2i64_helper(<2 x i64> %p)
define void @test_f128_v2i64(<2 x i64>* %p, fp128* %q) {
; CHECK: ext
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = call fp128 @test_f128_v2i64_helper(<2 x i64> %2)
%4 = fadd fp128 %3, %3
@@ -546,7 +546,7 @@ declare fp128 @test_f128_v4f32_helper(<4
define void @test_f128_v4f32(<4 x float>* %p, fp128* %q) {
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = call fp128 @test_f128_v4f32_helper(<4 x float> %2)
%4 = fadd fp128 %3, %3
@@ -559,7 +559,7 @@ declare fp128 @test_f128_v4i32_helper(<4
define void @test_f128_v4i32(<4 x i32>* %p, fp128* %q) {
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = call fp128 @test_f128_v4i32_helper(<4 x i32> %2)
%4 = fadd fp128 %3, %3
@@ -572,7 +572,7 @@ declare fp128 @test_f128_v8i16_helper(<8
define void @test_f128_v8i16(<8 x i16>* %p, fp128* %q) {
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = call fp128 @test_f128_v8i16_helper(<8 x i16> %2)
%4 = fadd fp128 %3, %3
@@ -585,7 +585,7 @@ declare fp128 @test_f128_v16i8_helper(<1
define void @test_f128_v16i8(<16 x i8>* %p, fp128* %q) {
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = call fp128 @test_f128_v16i8_helper(<16 x i8> %2)
%4 = fadd fp128 %3, %3
@@ -597,7 +597,7 @@ define void @test_f128_v16i8(<16 x i8>*
declare <2 x double> @test_v2f64_f128_helper(fp128 %p)
define void @test_v2f64_f128(fp128* %p, <2 x double>* %q) {
; CHECK: ext
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = call <2 x double> @test_v2f64_f128_helper(fp128 %2)
%4 = fadd <2 x double> %3, %3
@@ -610,7 +610,7 @@ declare <2 x double> @test_v2f64_v2i64_h
define void @test_v2f64_v2i64(<2 x i64>* %p, <2 x double>* %q) {
; CHECK: ext
; CHECK: ext
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = call <2 x double> @test_v2f64_v2i64_helper(<2 x i64> %2)
%4 = fadd <2 x double> %3, %3
@@ -624,7 +624,7 @@ define void @test_v2f64_v4f32(<4 x float
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
; CHECK: ext
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = call <2 x double> @test_v2f64_v4f32_helper(<4 x float> %2)
%4 = fadd <2 x double> %3, %3
@@ -638,7 +638,7 @@ define void @test_v2f64_v4i32(<4 x i32>*
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
; CHECK: ext
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = call <2 x double> @test_v2f64_v4i32_helper(<4 x i32> %2)
%4 = fadd <2 x double> %3, %3
@@ -652,7 +652,7 @@ define void @test_v2f64_v8i16(<8 x i16>*
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
; CHECK: ext
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = call <2 x double> @test_v2f64_v8i16_helper(<8 x i16> %2)
%4 = fadd <2 x double> %3, %3
@@ -666,7 +666,7 @@ define void @test_v2f64_v16i8(<16 x i8>*
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
; CHECK: ext
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = call <2 x double> @test_v2f64_v16i8_helper(<16 x i8> %2)
%4 = fadd <2 x double> %3, %3
@@ -678,7 +678,7 @@ define void @test_v2f64_v16i8(<16 x i8>*
declare <2 x i64> @test_v2i64_f128_helper(fp128 %p)
define void @test_v2i64_f128(fp128* %p, <2 x i64>* %q) {
; CHECK: ext
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = call <2 x i64> @test_v2i64_f128_helper(fp128 %2)
%4 = add <2 x i64> %3, %3
@@ -691,7 +691,7 @@ declare <2 x i64> @test_v2i64_v2f64_help
define void @test_v2i64_v2f64(<2 x double>* %p, <2 x i64>* %q) {
; CHECK: ext
; CHECK: ext
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = call <2 x i64> @test_v2i64_v2f64_helper(<2 x double> %2)
%4 = add <2 x i64> %3, %3
@@ -705,7 +705,7 @@ define void @test_v2i64_v4f32(<4 x float
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
; CHECK: ext
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = call <2 x i64> @test_v2i64_v4f32_helper(<4 x float> %2)
%4 = add <2 x i64> %3, %3
@@ -719,7 +719,7 @@ define void @test_v2i64_v4i32(<4 x i32>*
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
; CHECK: ext
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = call <2 x i64> @test_v2i64_v4i32_helper(<4 x i32> %2)
%4 = add <2 x i64> %3, %3
@@ -733,7 +733,7 @@ define void @test_v2i64_v8i16(<8 x i16>*
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
; CHECK: ext
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = call <2 x i64> @test_v2i64_v8i16_helper(<8 x i16> %2)
%4 = add <2 x i64> %3, %3
@@ -747,7 +747,7 @@ define void @test_v2i64_v16i8(<16 x i8>*
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
; CHECK: ext
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = call <2 x i64> @test_v2i64_v16i8_helper(<16 x i8> %2)
%4 = add <2 x i64> %3, %3
@@ -760,7 +760,7 @@ declare <4 x float> @test_v4f32_f128_hel
define void @test_v4f32_f128(fp128* %p, <4 x float>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = call <4 x float> @test_v4f32_f128_helper(fp128 %2)
%4 = fadd <4 x float> %3, %3
@@ -774,7 +774,7 @@ define void @test_v4f32_v2f64(<2 x doubl
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = call <4 x float> @test_v4f32_v2f64_helper(<2 x double> %2)
%4 = fadd <4 x float> %3, %3
@@ -788,7 +788,7 @@ define void @test_v4f32_v2i64(<2 x i64>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = call <4 x float> @test_v4f32_v2i64_helper(<2 x i64> %2)
%4 = fadd <4 x float> %3, %3
@@ -803,7 +803,7 @@ define void @test_v4f32_v4i32(<4 x i32>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = call <4 x float> @test_v4f32_v4i32_helper(<4 x i32> %2)
%4 = fadd <4 x float> %3, %3
@@ -818,7 +818,7 @@ define void @test_v4f32_v8i16(<8 x i16>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = call <4 x float> @test_v4f32_v8i16_helper(<8 x i16> %2)
%4 = fadd <4 x float> %3, %3
@@ -833,7 +833,7 @@ define void @test_v4f32_v16i8(<16 x i8>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = call <4 x float> @test_v4f32_v16i8_helper(<16 x i8> %2)
%4 = fadd <4 x float> %3, %3
@@ -846,7 +846,7 @@ declare <4 x i32> @test_v4i32_f128_helpe
define void @test_v4i32_f128(fp128* %p, <4 x i32>* %q) {
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = call <4 x i32> @test_v4i32_f128_helper(fp128 %2)
%4 = add <4 x i32> %3, %3
@@ -860,7 +860,7 @@ define void @test_v4i32_v2f64(<2 x doubl
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = call <4 x i32> @test_v4i32_v2f64_helper(<2 x double> %2)
%4 = add <4 x i32> %3, %3
@@ -874,7 +874,7 @@ define void @test_v4i32_v2i64(<2 x i64>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = call <4 x i32> @test_v4i32_v2i64_helper(<2 x i64> %2)
%4 = add <4 x i32> %3, %3
@@ -889,7 +889,7 @@ define void @test_v4i32_v4f32(<4 x float
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = call <4 x i32> @test_v4i32_v4f32_helper(<4 x float> %2)
%4 = add <4 x i32> %3, %3
@@ -904,7 +904,7 @@ define void @test_v4i32_v8i16(<8 x i16>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = call <4 x i32> @test_v4i32_v8i16_helper(<8 x i16> %2)
%4 = add <4 x i32> %3, %3
@@ -919,7 +919,7 @@ define void @test_v4i32_v16i8(<16 x i8>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.4s
; CHECK: ext
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = call <4 x i32> @test_v4i32_v16i8_helper(<16 x i8> %2)
%4 = add <4 x i32> %3, %3
@@ -932,7 +932,7 @@ declare <8 x i16> @test_v8i16_f128_helpe
define void @test_v8i16_f128(fp128* %p, <8 x i16>* %q) {
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = call <8 x i16> @test_v8i16_f128_helper(fp128 %2)
%4 = add <8 x i16> %3, %3
@@ -946,7 +946,7 @@ define void @test_v8i16_v2f64(<2 x doubl
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = call <8 x i16> @test_v8i16_v2f64_helper(<2 x double> %2)
%4 = add <8 x i16> %3, %3
@@ -960,7 +960,7 @@ define void @test_v8i16_v2i64(<2 x i64>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = call <8 x i16> @test_v8i16_v2i64_helper(<2 x i64> %2)
%4 = add <8 x i16> %3, %3
@@ -975,7 +975,7 @@ define void @test_v8i16_v4f32(<4 x float
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = call <8 x i16> @test_v8i16_v4f32_helper(<4 x float> %2)
%4 = add <8 x i16> %3, %3
@@ -990,7 +990,7 @@ define void @test_v8i16_v4i32(<4 x i32>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = call <8 x i16> @test_v8i16_v4i32_helper(<4 x i32> %2)
%4 = add <8 x i16> %3, %3
@@ -1005,7 +1005,7 @@ define void @test_v8i16_v16i8(<16 x i8>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.8h
; CHECK: ext
- %1 = load <16 x i8>* %p
+ %1 = load <16 x i8>, <16 x i8>* %p
%2 = add <16 x i8> %1, %1
%3 = call <8 x i16> @test_v8i16_v16i8_helper(<16 x i8> %2)
%4 = add <8 x i16> %3, %3
@@ -1018,7 +1018,7 @@ declare <16 x i8> @test_v16i8_f128_helpe
define void @test_v16i8_f128(fp128* %p, <16 x i8>* %q) {
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
- %1 = load fp128* %p
+ %1 = load fp128, fp128* %p
%2 = fadd fp128 %1, %1
%3 = call <16 x i8> @test_v16i8_f128_helper(fp128 %2)
%4 = add <16 x i8> %3, %3
@@ -1032,7 +1032,7 @@ define void @test_v16i8_v2f64(<2 x doubl
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
- %1 = load <2 x double>* %p
+ %1 = load <2 x double>, <2 x double>* %p
%2 = fadd <2 x double> %1, %1
%3 = call <16 x i8> @test_v16i8_v2f64_helper(<2 x double> %2)
%4 = add <16 x i8> %3, %3
@@ -1046,7 +1046,7 @@ define void @test_v16i8_v2i64(<2 x i64>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
- %1 = load <2 x i64>* %p
+ %1 = load <2 x i64>, <2 x i64>* %p
%2 = add <2 x i64> %1, %1
%3 = call <16 x i8> @test_v16i8_v2i64_helper(<2 x i64> %2)
%4 = add <16 x i8> %3, %3
@@ -1061,7 +1061,7 @@ define void @test_v16i8_v4f32(<4 x float
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
- %1 = load <4 x float>* %p
+ %1 = load <4 x float>, <4 x float>* %p
%2 = fadd <4 x float> %1, %1
%3 = call <16 x i8> @test_v16i8_v4f32_helper(<4 x float> %2)
%4 = add <16 x i8> %3, %3
@@ -1076,7 +1076,7 @@ define void @test_v16i8_v4i32(<4 x i32>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
- %1 = load <4 x i32>* %p
+ %1 = load <4 x i32>, <4 x i32>* %p
%2 = add <4 x i32> %1, %1
%3 = call <16 x i8> @test_v16i8_v4i32_helper(<4 x i32> %2)
%4 = add <16 x i8> %3, %3
@@ -1091,7 +1091,7 @@ define void @test_v16i8_v8i16(<8 x i16>*
; CHECK: ext
; CHECK: rev64 v{{[0-9]+}}.16b
; CHECK: ext
- %1 = load <8 x i16>* %p
+ %1 = load <8 x i16>, <8 x i16>* %p
%2 = add <8 x i16> %1, %1
%3 = call <16 x i8> @test_v16i8_v8i16_helper(<8 x i16> %2)
%4 = add <16 x i8> %3, %3
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-bitfield-extract.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@ define void @foo(%struct.X* nocapture %x
; CHECK: ret
%tmp = bitcast %struct.X* %x to i32*
- %tmp1 = load i32* %tmp, align 4
+ %tmp1 = load i32, i32* %tmp, align 4
%b = getelementptr inbounds %struct.Y, %struct.Y* %y, i64 0, i32 1
%bf.clear = lshr i32 %tmp1, 3
%bf.clear.lobit = and i32 %bf.clear, 1
@@ -46,7 +46,7 @@ define void @fct1(%struct.Z* nocapture %
; CHECK: ret
%tmp = bitcast %struct.Z* %x to i64*
- %tmp1 = load i64* %tmp, align 4
+ %tmp1 = load i64, i64* %tmp, align 4
%b = getelementptr inbounds %struct.A, %struct.A* %y, i64 0, i32 0
%bf.clear = lshr i64 %tmp1, 3
%bf.clear.lobit = and i64 %bf.clear, 1
@@ -77,7 +77,7 @@ entry:
; CHECK-NEXT: bfxil [[REG1]], x1, #16, #24
; CHECK-NEXT: str [[REG1]],
; CHECK-NEXT: ret
- %0 = load i64* %y, align 8
+ %0 = load i64, i64* %y, align 8
%and = and i64 %0, -16777216
%shr = lshr i64 %x, 16
%and1 = and i64 %shr, 16777215
@@ -93,7 +93,7 @@ entry:
; CHECK-NEXT: bfxil [[REG1]], w1, #16, #3
; CHECK-NEXT: str [[REG1]],
; CHECK-NEXT: ret
- %0 = load i32* %y, align 8
+ %0 = load i32, i32* %y, align 8
%and = and i32 %0, -8
%shr = lshr i32 %x, 16
%and1 = and i32 %shr, 7
@@ -112,7 +112,7 @@ entry:
; CHECK-NEXT: lsr [[REG2:w[0-9]+]], [[REG1]], #2
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i32* %y, align 8
+ %0 = load i32, i32* %y, align 8
%and = and i32 %0, -8
%shr = lshr i32 %x, 16
%and1 = and i32 %shr, 7
@@ -133,7 +133,7 @@ entry:
; CHECK-NEXT: lsl [[REG2:w[0-9]+]], [[REG1]], #2
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i32* %y, align 8
+ %0 = load i32, i32* %y, align 8
%and = and i32 %0, -8
%shr = lshr i32 %x, 16
%and1 = and i32 %shr, 7
@@ -155,7 +155,7 @@ entry:
; CHECK-NEXT: lsr [[REG2:x[0-9]+]], [[REG1]], #2
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i64* %y, align 8
+ %0 = load i64, i64* %y, align 8
%and = and i64 %0, -8
%shr = lshr i64 %x, 16
%and1 = and i64 %shr, 7
@@ -177,7 +177,7 @@ entry:
; CHECK-NEXT: lsl [[REG2:x[0-9]+]], [[REG1]], #2
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i64* %y, align 8
+ %0 = load i64, i64* %y, align 8
%and = and i64 %0, -8
%shr = lshr i64 %x, 16
%and1 = and i64 %shr, 7
@@ -198,7 +198,7 @@ entry:
; CHECK-NEXT: lsl [[REG2:w[0-9]+]], [[REG1]], #2
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i32* %y, align 8
+ %0 = load i32, i32* %y, align 8
%and = and i32 %0, -8
%and1 = and i32 %x, 7
%or = or i32 %and, %and1
@@ -218,7 +218,7 @@ entry:
; CHECK-NEXT: lsl [[REG2:x[0-9]+]], [[REG1]], #2
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i64* %y, align 8
+ %0 = load i64, i64* %y, align 8
%and = and i64 %0, -8
%and1 = and i64 %x, 7
%or = or i64 %and, %and1
@@ -247,7 +247,7 @@ entry:
; CHECK-NEXT: ubfx [[REG2:w[0-9]+]], [[REG1]], #2, #28
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i32* %y, align 8
+ %0 = load i32, i32* %y, align 8
%and = and i32 %0, -8
%shr = lshr i32 %x, 16
%and1 = and i32 %shr, 7
@@ -270,7 +270,7 @@ entry:
; CHECK-NEXT: ubfx [[REG2:x[0-9]+]], [[REG1]], #2, #60
; CHECK-NEXT: str [[REG2]],
; CHECK-NEXT: ret
- %0 = load i64* %y, align 8
+ %0 = load i64, i64* %y, align 8
%and = and i64 %0, -8
%shr = lshr i64 %x, 16
%and1 = and i64 %shr, 7
@@ -296,7 +296,7 @@ entry:
; CHECK-NEXT: lsl [[REG3:w[0-9]+]], [[REG2]], #2
; CHECK-NEXT: str [[REG3]],
; CHECK-NEXT: ret
- %0 = load i32* %y, align 8
+ %0 = load i32, i32* %y, align 8
%and = and i32 %0, -256
%shr = lshr i32 %x, 16
%and1 = and i32 %shr, 255
@@ -326,7 +326,7 @@ entry:
; CHECK-NEXT: lsl [[REG3:x[0-9]+]], [[REG2]], #2
; CHECK-NEXT: str [[REG3]],
; CHECK-NEXT: ret
- %0 = load i64* %y, align 8
+ %0 = load i64, i64* %y, align 8
%and = and i64 %0, -256
%shr = lshr i64 %x, 16
%and1 = and i64 %shr, 255
@@ -357,7 +357,7 @@ entry:
; CHECK-NEXT: ubfx [[REG3:w[0-9]+]], [[REG2]], #2, #28
; CHECK-NEXT: str [[REG3]],
; CHECK-NEXT: ret
- %0 = load i32* %y, align 8
+ %0 = load i32, i32* %y, align 8
%and = and i32 %0, 1737056
%shr = lshr i32 %x, 16
%and1 = and i32 %shr, 7
@@ -386,7 +386,7 @@ entry:
; CHECK-NEXT: ubfx [[REG3:x[0-9]+]], [[REG2]], #2, #60
; CHECK-NEXT: str [[REG3]],
; CHECK-NEXT: ret
- %0 = load i64* %y, align 8
+ %0 = load i64, i64* %y, align 8
%and = and i64 %0, 1737056
%shr = lshr i64 %x, 16
%and1 = and i64 %shr, 7
@@ -422,7 +422,7 @@ entry:
if.then: ; preds = %entry
%arrayidx3 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %x.sroa.5.0.extract.shift
- %0 = load i8* %arrayidx3, align 1
+ %0 = load i8, i8* %arrayidx3, align 1
%conv = zext i8 %0 to i32
br label %return
@@ -444,7 +444,7 @@ if.then7:
; CHECK-NOT: ubfm
%idxprom10 = and i64 %x.sroa.3.0.extract.shift, 65535
%arrayidx11 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom10
- %1 = load i8* %arrayidx11, align 1
+ %1 = load i8, i8* %arrayidx11, align 1
%conv12 = zext i8 %1 to i32
%add = add nsw i32 %conv12, 16
br label %return
@@ -467,7 +467,7 @@ if.then17:
; CHECK-NOT: ubfm
%idxprom20 = and i64 %x.sroa.1.0.extract.shift, 65535
%arrayidx21 = getelementptr inbounds [65536 x i8], [65536 x i8]* @first_ones, i64 0, i64 %idxprom20
- %2 = load i8* %arrayidx21, align 1
+ %2 = load i8, i8* %arrayidx21, align 1
%conv22 = zext i8 %2 to i32
%add23 = add nsw i32 %conv22, 32
br label %return
@@ -510,7 +510,7 @@ entry:
%shr = lshr i64 %x, 4
%and = and i64 %shr, 15
%arrayidx = getelementptr inbounds [8 x [64 x i64]], [8 x [64 x i64]]* @arr, i64 0, i64 0, i64 %and
- %0 = load i64* %arrayidx, align 8
+ %0 = load i64, i64* %arrayidx, align 8
ret i64 %0
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-blockaddress.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-blockaddress.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-blockaddress.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-blockaddress.ll Fri Feb 27 15:17:42 2015
@@ -25,6 +25,6 @@ entry:
br label %mylabel
mylabel:
- %tmp = load volatile i64* %recover, align 8
+ %tmp = load volatile i64, i64* %recover, align 8
ret i64 %tmp
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-call-tailcalls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-call-tailcalls.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-call-tailcalls.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-call-tailcalls.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define void @t2() {
; CHECK: ldr x[[ADDR:[0-9]+]], [x[[GOTADDR]], _t at GOTPAGEOFF]
; CHECK: ldr x[[DEST:[0-9]+]], [x[[ADDR]]]
; CHECK: br x[[DEST]]
- %tmp = load i32 ()** @t
+ %tmp = load i32 ()*, i32 ()** @t
%tmp.upgrd.2 = tail call i32 %tmp()
ret void
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-cast-opt.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@ define zeroext i8 @foo(i32 %i1, i32 %i2)
; CHECK-NOT: and
entry:
%idxprom = sext i32 %i1 to i64
- %0 = load i8** @block, align 8
+ %0 = load i8*, i8** @block, align 8
%arrayidx = getelementptr inbounds i8, i8* %0, i64 %idxprom
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
%idxprom1 = sext i32 %i2 to i64
%arrayidx2 = getelementptr inbounds i8, i8* %0, i64 %idxprom1
- %2 = load i8* %arrayidx2, align 1
+ %2 = load i8, i8* %arrayidx2, align 1
%cmp = icmp eq i8 %1, %2
br i1 %cmp, label %return, label %if.then
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ccmp-heuristics.ll Fri Feb 27 15:17:42 2015
@@ -21,7 +21,7 @@ target triple = "arm64-apple-ios7.0.0"
; CHECK-NEXT b.cc
define i32 @Maze1() nounwind ssp {
entry:
- %0 = load i64* @channelColumns, align 8, !tbaa !0
+ %0 = load i64, i64* @channelColumns, align 8, !tbaa !0
%cmp90 = icmp eq i64 %0, 0
br i1 %cmp90, label %for.end, label %for.body
@@ -29,51 +29,51 @@ for.body:
%1 = phi i64 [ %0, %entry ], [ %37, %for.inc ]
%i.092 = phi i64 [ 1, %entry ], [ %inc53, %for.inc ]
%numLeft.091 = phi i32 [ 0, %entry ], [ %numLeft.1, %for.inc ]
- %2 = load i8** @mazeRoute, align 8, !tbaa !3
+ %2 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
%arrayidx = getelementptr inbounds i8, i8* %2, i64 %i.092
- %3 = load i8* %arrayidx, align 1, !tbaa !1
+ %3 = load i8, i8* %arrayidx, align 1, !tbaa !1
%tobool = icmp eq i8 %3, 0
br i1 %tobool, label %for.inc, label %if.then
if.then: ; preds = %for.body
- %4 = load i64** @TOP, align 8, !tbaa !3
+ %4 = load i64*, i64** @TOP, align 8, !tbaa !3
%arrayidx1 = getelementptr inbounds i64, i64* %4, i64 %i.092
- %5 = load i64* %arrayidx1, align 8, !tbaa !0
- %6 = load i64** @netsAssign, align 8, !tbaa !3
+ %5 = load i64, i64* %arrayidx1, align 8, !tbaa !0
+ %6 = load i64*, i64** @netsAssign, align 8, !tbaa !3
%arrayidx2 = getelementptr inbounds i64, i64* %6, i64 %5
- %7 = load i64* %arrayidx2, align 8, !tbaa !0
- %8 = load i64** @BOT, align 8, !tbaa !3
+ %7 = load i64, i64* %arrayidx2, align 8, !tbaa !0
+ %8 = load i64*, i64** @BOT, align 8, !tbaa !3
%arrayidx3 = getelementptr inbounds i64, i64* %8, i64 %i.092
- %9 = load i64* %arrayidx3, align 8, !tbaa !0
+ %9 = load i64, i64* %arrayidx3, align 8, !tbaa !0
%arrayidx4 = getelementptr inbounds i64, i64* %6, i64 %9
- %10 = load i64* %arrayidx4, align 8, !tbaa !0
+ %10 = load i64, i64* %arrayidx4, align 8, !tbaa !0
%cmp5 = icmp ugt i64 %i.092, 1
%cmp6 = icmp ugt i64 %10, 1
%or.cond = and i1 %cmp5, %cmp6
br i1 %or.cond, label %land.lhs.true7, label %if.else
land.lhs.true7: ; preds = %if.then
- %11 = load i64* @channelTracks, align 8, !tbaa !0
+ %11 = load i64, i64* @channelTracks, align 8, !tbaa !0
%add = add i64 %11, 1
%call = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add, i64 %10, i64 0, i64 %7, i32 -1, i32 -1)
%tobool8 = icmp eq i32 %call, 0
br i1 %tobool8, label %land.lhs.true7.if.else_crit_edge, label %if.then9
land.lhs.true7.if.else_crit_edge: ; preds = %land.lhs.true7
- %.pre = load i64* @channelColumns, align 8, !tbaa !0
+ %.pre = load i64, i64* @channelColumns, align 8, !tbaa !0
br label %if.else
if.then9: ; preds = %land.lhs.true7
- %12 = load i8** @mazeRoute, align 8, !tbaa !3
+ %12 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
%arrayidx10 = getelementptr inbounds i8, i8* %12, i64 %i.092
store i8 0, i8* %arrayidx10, align 1, !tbaa !1
- %13 = load i64** @TOP, align 8, !tbaa !3
+ %13 = load i64*, i64** @TOP, align 8, !tbaa !3
%arrayidx11 = getelementptr inbounds i64, i64* %13, i64 %i.092
- %14 = load i64* %arrayidx11, align 8, !tbaa !0
+ %14 = load i64, i64* %arrayidx11, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %14)
- %15 = load i64** @BOT, align 8, !tbaa !3
+ %15 = load i64*, i64** @BOT, align 8, !tbaa !3
%arrayidx12 = getelementptr inbounds i64, i64* %15, i64 %i.092
- %16 = load i64* %arrayidx12, align 8, !tbaa !0
+ %16 = load i64, i64* %arrayidx12, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %16)
br label %for.inc
@@ -84,23 +84,23 @@ if.else:
br i1 %or.cond89, label %land.lhs.true16, label %if.else24
land.lhs.true16: ; preds = %if.else
- %18 = load i64* @channelTracks, align 8, !tbaa !0
+ %18 = load i64, i64* @channelTracks, align 8, !tbaa !0
%add17 = add i64 %18, 1
%call18 = tail call fastcc i32 @Maze1Mech(i64 %i.092, i64 %add17, i64 %10, i64 0, i64 %7, i32 1, i32 -1)
%tobool19 = icmp eq i32 %call18, 0
br i1 %tobool19, label %if.else24, label %if.then20
if.then20: ; preds = %land.lhs.true16
- %19 = load i8** @mazeRoute, align 8, !tbaa !3
+ %19 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
%arrayidx21 = getelementptr inbounds i8, i8* %19, i64 %i.092
store i8 0, i8* %arrayidx21, align 1, !tbaa !1
- %20 = load i64** @TOP, align 8, !tbaa !3
+ %20 = load i64*, i64** @TOP, align 8, !tbaa !3
%arrayidx22 = getelementptr inbounds i64, i64* %20, i64 %i.092
- %21 = load i64* %arrayidx22, align 8, !tbaa !0
+ %21 = load i64, i64* %arrayidx22, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %21)
- %22 = load i64** @BOT, align 8, !tbaa !3
+ %22 = load i64*, i64** @BOT, align 8, !tbaa !3
%arrayidx23 = getelementptr inbounds i64, i64* %22, i64 %i.092
- %23 = load i64* %arrayidx23, align 8, !tbaa !0
+ %23 = load i64, i64* %arrayidx23, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %23)
br label %for.inc
@@ -108,7 +108,7 @@ if.else24:
br i1 %cmp5, label %land.lhs.true26, label %if.else36
land.lhs.true26: ; preds = %if.else24
- %24 = load i64* @channelTracks, align 8, !tbaa !0
+ %24 = load i64, i64* @channelTracks, align 8, !tbaa !0
%cmp27 = icmp ult i64 %7, %24
br i1 %cmp27, label %land.lhs.true28, label %if.else36
@@ -119,26 +119,26 @@ land.lhs.true28:
br i1 %tobool31, label %if.else36, label %if.then32
if.then32: ; preds = %land.lhs.true28
- %25 = load i8** @mazeRoute, align 8, !tbaa !3
+ %25 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
%arrayidx33 = getelementptr inbounds i8, i8* %25, i64 %i.092
store i8 0, i8* %arrayidx33, align 1, !tbaa !1
- %26 = load i64** @TOP, align 8, !tbaa !3
+ %26 = load i64*, i64** @TOP, align 8, !tbaa !3
%arrayidx34 = getelementptr inbounds i64, i64* %26, i64 %i.092
- %27 = load i64* %arrayidx34, align 8, !tbaa !0
+ %27 = load i64, i64* %arrayidx34, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %27)
- %28 = load i64** @BOT, align 8, !tbaa !3
+ %28 = load i64*, i64** @BOT, align 8, !tbaa !3
%arrayidx35 = getelementptr inbounds i64, i64* %28, i64 %i.092
- %29 = load i64* %arrayidx35, align 8, !tbaa !0
+ %29 = load i64, i64* %arrayidx35, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %29)
br label %for.inc
if.else36: ; preds = %land.lhs.true28, %land.lhs.true26, %if.else24
- %30 = load i64* @channelColumns, align 8, !tbaa !0
+ %30 = load i64, i64* @channelColumns, align 8, !tbaa !0
%cmp37 = icmp ult i64 %i.092, %30
br i1 %cmp37, label %land.lhs.true38, label %if.else48
land.lhs.true38: ; preds = %if.else36
- %31 = load i64* @channelTracks, align 8, !tbaa !0
+ %31 = load i64, i64* @channelTracks, align 8, !tbaa !0
%cmp39 = icmp ult i64 %7, %31
br i1 %cmp39, label %land.lhs.true40, label %if.else48
@@ -149,16 +149,16 @@ land.lhs.true40:
br i1 %tobool43, label %if.else48, label %if.then44
if.then44: ; preds = %land.lhs.true40
- %32 = load i8** @mazeRoute, align 8, !tbaa !3
+ %32 = load i8*, i8** @mazeRoute, align 8, !tbaa !3
%arrayidx45 = getelementptr inbounds i8, i8* %32, i64 %i.092
store i8 0, i8* %arrayidx45, align 1, !tbaa !1
- %33 = load i64** @TOP, align 8, !tbaa !3
+ %33 = load i64*, i64** @TOP, align 8, !tbaa !3
%arrayidx46 = getelementptr inbounds i64, i64* %33, i64 %i.092
- %34 = load i64* %arrayidx46, align 8, !tbaa !0
+ %34 = load i64, i64* %arrayidx46, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %34)
- %35 = load i64** @BOT, align 8, !tbaa !3
+ %35 = load i64*, i64** @BOT, align 8, !tbaa !3
%arrayidx47 = getelementptr inbounds i64, i64* %35, i64 %i.092
- %36 = load i64* %arrayidx47, align 8, !tbaa !0
+ %36 = load i64, i64* %arrayidx47, align 8, !tbaa !0
tail call fastcc void @CleanNet(i64 %36)
br label %for.inc
@@ -169,7 +169,7 @@ if.else48:
for.inc: ; preds = %if.else48, %if.then44, %if.then32, %if.then20, %if.then9, %for.body
%numLeft.1 = phi i32 [ %numLeft.091, %if.then9 ], [ %numLeft.091, %if.then20 ], [ %numLeft.091, %if.then32 ], [ %numLeft.091, %if.then44 ], [ %inc, %if.else48 ], [ %numLeft.091, %for.body ]
%inc53 = add i64 %i.092, 1
- %37 = load i64* @channelColumns, align 8, !tbaa !0
+ %37 = load i64, i64* @channelColumns, align 8, !tbaa !0
%cmp = icmp ugt i64 %inc53, %37
br i1 %cmp, label %for.end, label %for.body
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ccmp.ll Fri Feb 27 15:17:42 2015
@@ -283,7 +283,7 @@ sw.bb.i.i:
%ref.tr.i.i = phi %str1* [ %0, %sw.bb.i.i ], [ undef, %entry ]
%operands.i.i = getelementptr inbounds %str1, %str1* %ref.tr.i.i, i64 0, i32 0, i32 2
%arrayidx.i.i = bitcast i32* %operands.i.i to %str1**
- %0 = load %str1** %arrayidx.i.i, align 8
+ %0 = load %str1*, %str1** %arrayidx.i.i, align 8
%code1.i.i.phi.trans.insert = getelementptr inbounds %str1, %str1* %0, i64 0, i32 0, i32 0, i64 16
br label %sw.bb.i.i
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-code-model-large-abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-code-model-large-abs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-code-model-large-abs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-code-model-large-abs.ll Fri Feb 27 15:17:42 2015
@@ -18,7 +18,7 @@ define i8* @global_addr() {
define i8 @global_i8() {
; CHECK-LABEL: global_i8:
- %val = load i8* @var8
+ %val = load i8, i8* @var8
ret i8 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var8
; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var8
@@ -29,7 +29,7 @@ define i8 @global_i8() {
define i16 @global_i16() {
; CHECK-LABEL: global_i16:
- %val = load i16* @var16
+ %val = load i16, i16* @var16
ret i16 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var16
; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var16
@@ -40,7 +40,7 @@ define i16 @global_i16() {
define i32 @global_i32() {
; CHECK-LABEL: global_i32:
- %val = load i32* @var32
+ %val = load i32, i32* @var32
ret i32 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var32
; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var32
@@ -51,7 +51,7 @@ define i32 @global_i32() {
define i64 @global_i64() {
; CHECK-LABEL: global_i64:
- %val = load i64* @var64
+ %val = load i64, i64* @var64
ret i64 %val
; CHECK: movz x[[ADDR_REG:[0-9]+]], #:abs_g3:var64
; CHECK: movk x[[ADDR_REG]], #:abs_g2_nc:var64
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh-garbage-crash.ll Fri Feb 27 15:17:42 2015
@@ -22,13 +22,13 @@ define void @foo() {
entry:
br label %if.then83
if.then83: ; preds = %if.end81
- %tmp = load %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
+ %tmp = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
%call84 = call i32 @_ZN5H4ISP11H4ISPDevice32ISP_SelectBestMIPIFrequencyIndexEjPj(%"class.H4ISP::H4ISPDevice"* %tmp) #19
tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27}"()
- %tmp2 = load %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
+ %tmp2 = load %"class.H4ISP::H4ISPDevice"*, %"class.H4ISP::H4ISPDevice"** @pH4ISPDevice, align 8
tail call void asm sideeffect "", "~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x28}"()
%pCameraManager.i268 = getelementptr inbounds %"class.H4ISP::H4ISPDevice", %"class.H4ISP::H4ISPDevice"* %tmp2, i64 0, i32 3
- %tmp3 = load %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8
+ %tmp3 = load %"class.H4ISP::H4ISPCameraManager"*, %"class.H4ISP::H4ISPCameraManager"** %pCameraManager.i268, align 8
%tobool.i269 = icmp eq %"class.H4ISP::H4ISPCameraManager"* %tmp3, null
br i1 %tobool.i269, label %if.then83, label %end
end:
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-collect-loh.ll Fri Feb 27 15:17:42 2015
@@ -12,7 +12,7 @@
; Function Attrs: noinline nounwind ssp
define void @foo(i32 %t) {
entry:
- %tmp = load i32* @a, align 4
+ %tmp = load i32, i32* @a, align 4
%add = add nsw i32 %tmp, %t
store i32 %add, i32* @a, align 4
ret void
@@ -32,22 +32,22 @@ entry:
br i1 %cmp, label %if.then, label %if.end4
if.then: ; preds = %entry
- %tmp = load i32* @a, align 4
+ %tmp = load i32, i32* @a, align 4
%add = add nsw i32 %tmp, %t
%cmp1 = icmp sgt i32 %add, 12
br i1 %cmp1, label %if.then2, label %if.end4
if.then2: ; preds = %if.then
tail call void @foo(i32 %add)
- %tmp1 = load i32* @a, align 4
+ %tmp1 = load i32, i32* @a, align 4
br label %if.end4
if.end4: ; preds = %if.then2, %if.then, %entry
%t.addr.0 = phi i32 [ %tmp1, %if.then2 ], [ %t, %if.then ], [ %t, %entry ]
- %tmp2 = load i32* @b, align 4
+ %tmp2 = load i32, i32* @b, align 4
%add5 = add nsw i32 %tmp2, %t.addr.0
tail call void @foo(i32 %add5)
- %tmp3 = load i32* @b, align 4
+ %tmp3 = load i32, i32* @b, align 4
%add6 = add nsw i32 %tmp3, %t.addr.0
ret i32 %add6
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-complex-copy-noneon.ll Fri Feb 27 15:17:42 2015
@@ -9,9 +9,9 @@ define void @store_combine() nounwind {
%dst = alloca { double, double }, align 8
%src.realp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 0
- %src.real = load double* %src.realp
+ %src.real = load double, double* %src.realp
%src.imagp = getelementptr inbounds { double, double }, { double, double }* %src, i32 0, i32 1
- %src.imag = load double* %src.imagp
+ %src.imag = load double, double* %src.imagp
%dst.realp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 0
%dst.imagp = getelementptr inbounds { double, double }, { double, double }* %dst, i32 0, i32 1
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-const-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-const-addr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-const-addr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-const-addr.ll Fri Feb 27 15:17:42 2015
@@ -11,12 +11,12 @@ define i32 @test1() nounwind {
; CHECK: ldr w8, [x8, #12]
%at = inttoptr i64 68141056 to %T*
%o1 = getelementptr %T, %T* %at, i32 0, i32 1
- %t1 = load i32* %o1
+ %t1 = load i32, i32* %o1
%o2 = getelementptr %T, %T* %at, i32 0, i32 2
- %t2 = load i32* %o2
+ %t2 = load i32, i32* %o2
%a1 = add i32 %t1, %t2
%o3 = getelementptr %T, %T* %at, i32 0, i32 3
- %t3 = load i32* %o3
+ %t3 = load i32, i32* %o3
%a2 = add i32 %a1, %t3
ret i32 %a2
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-convert-v4f64.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define <4 x i16> @fptosi_v4f64_to_v4i16(
; CHECK-DAG: xtn v[[LHS_NA:[0-9]+]].2s, v[[LHS]].2d
; CHECK-DAG: xtn v[[RHS_NA:[0-9]+]].2s, v[[RHS]].2d
; CHECK: uzp1 v0.4h, v[[RHS_NA]].4h, v[[LHS_NA]].4h
- %tmp1 = load <4 x double>* %ptr
+ %tmp1 = load <4 x double>, <4 x double>* %ptr
%tmp2 = fptosi <4 x double> %tmp1 to <4 x i16>
ret <4 x i16> %tmp2
}
@@ -26,7 +26,7 @@ define <8 x i8> @fptosi_v4f64_to_v4i8(<8
; CHECK-DAG: uzp1 v[[TMP1:[0-9]+]].4h, v[[CONV2]].4h, v[[CONV3]].4h
; CHECK-DAG: uzp1 v[[TMP2:[0-9]+]].4h, v[[CONV0]].4h, v[[CONV1]].4h
; CHECK: uzp1 v0.8b, v[[TMP2]].8b, v[[TMP1]].8b
- %tmp1 = load <8 x double>* %ptr
+ %tmp1 = load <8 x double>, <8 x double>* %ptr
%tmp2 = fptosi <8 x double> %tmp1 to <8 x i8>
ret <8 x i8> %tmp2
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-cse.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-cse.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-cse.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-cse.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ entry:
; CHECK: sub
; CHECK-NOT: sub
; CHECK: ret
- %0 = load i32* %offset, align 4
+ %0 = load i32, i32* %offset, align 4
%cmp = icmp slt i32 %0, %size
%s = sub nsw i32 %0, %size
br i1 %cmp, label %return, label %if.end
@@ -43,7 +43,7 @@ entry:
; CHECK: b.lt
; CHECK-NOT: sub
; CHECK: ret
- %0 = load i32* %offset, align 4
+ %0 = load i32, i32* %offset, align 4
%cmp = icmp slt i32 %0, 1
br i1 %cmp, label %return, label %if.end
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-dead-indexed-load.ll Fri Feb 27 15:17:42 2015
@@ -19,7 +19,7 @@ define void @test(%"struct.SU"* nocaptur
entry:
%r1 = getelementptr inbounds %"struct.SU", %"struct.SU"* %su, i64 1, i32 5
%r2 = bitcast %"struct.BO"* %r1 to i48*
- %r3 = load i48* %r2, align 8
+ %r3 = load i48, i48* %r2, align 8
%r4 = and i48 %r3, -4294967296
%r5 = or i48 0, %r4
store i48 %r5, i48* %r2, align 8
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-dagcombiner-load-slicing.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ define void @test(%class.Complex* nocapt
entry:
%arrayidx = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %out_start
%0 = bitcast %class.Complex* %arrayidx to i64*
- %1 = load i64* %0, align 4
+ %1 = load i64, i64* %0, align 4
%t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
%2 = bitcast i32 %t0.sroa.0.0.extract.trunc to float
%t0.sroa.2.0.extract.shift = lshr i64 %1, 32
@@ -25,11 +25,11 @@ entry:
%add = add i64 %out_start, 8
%arrayidx2 = getelementptr inbounds %class.Complex, %class.Complex* %out, i64 %add
%i.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 0
- %4 = load float* %i.i, align 4
+ %4 = load float, float* %i.i, align 4
%add.i = fadd float %4, %2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x float> undef, float %add.i, i32 0
%r.i = getelementptr inbounds %class.Complex, %class.Complex* %arrayidx2, i64 0, i32 1
- %5 = load float* %r.i, align 4
+ %5 = load float, float* %r.i, align 4
%add5.i = fadd float %5, %3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x float> %retval.sroa.0.0.vec.insert.i, float %add5.i, i32 1
%ref.tmp.sroa.0.0.cast = bitcast %class.Complex* %arrayidx to <2 x float>*
@@ -48,7 +48,7 @@ define void @test_int(%class.Complex_int
entry:
%arrayidx = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %out_start
%0 = bitcast %class.Complex_int* %arrayidx to i64*
- %1 = load i64* %0, align 4
+ %1 = load i64, i64* %0, align 4
%t0.sroa.0.0.extract.trunc = trunc i64 %1 to i32
%2 = bitcast i32 %t0.sroa.0.0.extract.trunc to i32
%t0.sroa.2.0.extract.shift = lshr i64 %1, 32
@@ -57,11 +57,11 @@ entry:
%add = add i64 %out_start, 8
%arrayidx2 = getelementptr inbounds %class.Complex_int, %class.Complex_int* %out, i64 %add
%i.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 0
- %4 = load i32* %i.i, align 4
+ %4 = load i32, i32* %i.i, align 4
%add.i = add i32 %4, %2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x i32> undef, i32 %add.i, i32 0
%r.i = getelementptr inbounds %class.Complex_int, %class.Complex_int* %arrayidx2, i64 0, i32 1
- %5 = load i32* %r.i, align 4
+ %5 = load i32, i32* %r.i, align 4
%add5.i = add i32 %5, %3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x i32> %retval.sroa.0.0.vec.insert.i, i32 %add5.i, i32 1
%ref.tmp.sroa.0.0.cast = bitcast %class.Complex_int* %arrayidx to <2 x i32>*
@@ -80,7 +80,7 @@ define void @test_long(%class.Complex_lo
entry:
%arrayidx = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %out_start
%0 = bitcast %class.Complex_long* %arrayidx to i128*
- %1 = load i128* %0, align 4
+ %1 = load i128, i128* %0, align 4
%t0.sroa.0.0.extract.trunc = trunc i128 %1 to i64
%2 = bitcast i64 %t0.sroa.0.0.extract.trunc to i64
%t0.sroa.2.0.extract.shift = lshr i128 %1, 64
@@ -89,11 +89,11 @@ entry:
%add = add i64 %out_start, 8
%arrayidx2 = getelementptr inbounds %class.Complex_long, %class.Complex_long* %out, i64 %add
%i.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 0
- %4 = load i64* %i.i, align 4
+ %4 = load i64, i64* %i.i, align 4
%add.i = add i64 %4, %2
%retval.sroa.0.0.vec.insert.i = insertelement <2 x i64> undef, i64 %add.i, i32 0
%r.i = getelementptr inbounds %class.Complex_long, %class.Complex_long* %arrayidx2, i32 0, i32 1
- %5 = load i64* %r.i, align 4
+ %5 = load i64, i64* %r.i, align 4
%add5.i = add i64 %5, %3
%retval.sroa.0.4.vec.insert.i = insertelement <2 x i64> %retval.sroa.0.0.vec.insert.i, i64 %add5.i, i32 1
%ref.tmp.sroa.0.0.cast = bitcast %class.Complex_long* %arrayidx to <2 x i64>*
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-dup.ll Fri Feb 27 15:17:42 2015
@@ -165,7 +165,7 @@ define <4 x float> @v_shuffledupQfloat(f
define <8 x i8> @vduplane8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vduplane8:
;CHECK: dup.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
ret <8 x i8> %tmp2
}
@@ -173,7 +173,7 @@ define <8 x i8> @vduplane8(<8 x i8>* %A)
define <4 x i16> @vduplane16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vduplane16:
;CHECK: dup.4h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
ret <4 x i16> %tmp2
}
@@ -181,7 +181,7 @@ define <4 x i16> @vduplane16(<4 x i16>*
define <2 x i32> @vduplane32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vduplane32:
;CHECK: dup.2s
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> < i32 1, i32 1 >
ret <2 x i32> %tmp2
}
@@ -189,7 +189,7 @@ define <2 x i32> @vduplane32(<2 x i32>*
define <2 x float> @vduplanefloat(<2 x float>* %A) nounwind {
;CHECK-LABEL: vduplanefloat:
;CHECK: dup.2s
- %tmp1 = load <2 x float>* %A
+ %tmp1 = load <2 x float>, <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> < i32 1, i32 1 >
ret <2 x float> %tmp2
}
@@ -197,7 +197,7 @@ define <2 x float> @vduplanefloat(<2 x f
define <16 x i8> @vduplaneQ8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: vduplaneQ8:
;CHECK: dup.16b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <16 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
ret <16 x i8> %tmp2
}
@@ -205,7 +205,7 @@ define <16 x i8> @vduplaneQ8(<8 x i8>* %
define <8 x i16> @vduplaneQ16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: vduplaneQ16:
;CHECK: dup.8h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <8 x i32> < i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1 >
ret <8 x i16> %tmp2
}
@@ -213,7 +213,7 @@ define <8 x i16> @vduplaneQ16(<4 x i16>*
define <4 x i32> @vduplaneQ32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: vduplaneQ32:
;CHECK: dup.4s
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
ret <4 x i32> %tmp2
}
@@ -221,7 +221,7 @@ define <4 x i32> @vduplaneQ32(<2 x i32>*
define <4 x float> @vduplaneQfloat(<2 x float>* %A) nounwind {
;CHECK-LABEL: vduplaneQfloat:
;CHECK: dup.4s
- %tmp1 = load <2 x float>* %A
+ %tmp1 = load <2 x float>, <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <4 x i32> < i32 1, i32 1, i32 1, i32 1 >
ret <4 x float> %tmp2
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-early-ifcvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-early-ifcvt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-early-ifcvt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-early-ifcvt.ll Fri Feb 27 15:17:42 2015
@@ -15,7 +15,7 @@ do.body:
%n.addr.0 = phi i32 [ %n, %entry ], [ %dec, %do.cond ]
%p.addr.0 = phi i32* [ %p, %entry ], [ %incdec.ptr, %do.cond ]
%incdec.ptr = getelementptr inbounds i32, i32* %p.addr.0, i64 1
- %0 = load i32* %p.addr.0, align 4
+ %0 = load i32, i32* %p.addr.0, align 4
%cmp = icmp sgt i32 %0, %max.0
br i1 %cmp, label %do.cond, label %if.else
@@ -400,7 +400,7 @@ entry:
br label %for.body
for.body:
- %x0 = load i32* undef, align 4
+ %x0 = load i32, i32* undef, align 4
br i1 undef, label %if.then.i146, label %is_sbox.exit155
if.then.i146:
@@ -413,7 +413,7 @@ is_sbox.exit155:
%seg_offset.0.i151 = phi i32 [ %add9.i145, %if.then.i146 ], [ undef, %for.body ]
%idxprom15.i152 = sext i32 %seg_offset.0.i151 to i64
%arrayidx18.i154 = getelementptr inbounds i32, i32* null, i64 %idxprom15.i152
- %x1 = load i32* %arrayidx18.i154, align 4
+ %x1 = load i32, i32* %arrayidx18.i154, align 4
br i1 undef, label %for.body51, label %for.body
for.body51: ; preds = %is_sbox.exit155
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-elf-globals.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-elf-globals.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-elf-globals.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-elf-globals.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@
@var64 = external global i64, align 8
define i8 @test_i8(i8 %new) {
- %val = load i8* @var8, align 1
+ %val = load i8, i8* @var8, align 1
store i8 %new, i8* @var8
ret i8 %val
; CHECK-LABEL: test_i8:
@@ -31,7 +31,7 @@ define i8 @test_i8(i8 %new) {
}
define i16 @test_i16(i16 %new) {
- %val = load i16* @var16, align 2
+ %val = load i16, i16* @var16, align 2
store i16 %new, i16* @var16
ret i16 %val
; CHECK-LABEL: test_i16:
@@ -44,7 +44,7 @@ define i16 @test_i16(i16 %new) {
}
define i32 @test_i32(i32 %new) {
- %val = load i32* @var32, align 4
+ %val = load i32, i32* @var32, align 4
store i32 %new, i32* @var32
ret i32 %val
; CHECK-LABEL: test_i32:
@@ -57,7 +57,7 @@ define i32 @test_i32(i32 %new) {
}
define i64 @test_i64(i64 %new) {
- %val = load i64* @var64, align 8
+ %val = load i64, i64* @var64, align 8
store i64 %new, i64* @var64
ret i64 %val
; CHECK-LABEL: test_i64:
@@ -83,8 +83,8 @@ define i64* @test_addr() {
@protectedvar = protected global i32 0, align 4
define i32 @test_vis() {
- %lhs = load i32* @hiddenvar, align 4
- %rhs = load i32* @protectedvar, align 4
+ %lhs = load i32, i32* @hiddenvar, align 4
+ %rhs = load i32, i32* @protectedvar, align 4
%ret = add i32 %lhs, %rhs
ret i32 %ret
; CHECK-PIC: adrp {{x[0-9]+}}, hiddenvar
@@ -97,7 +97,7 @@ define i32 @test_vis() {
define i32 @test_default_align() {
%addr = getelementptr [2 x i32], [2 x i32]* @var_default, i32 0, i32 0
- %val = load i32* %addr
+ %val = load i32, i32* %addr
ret i32 %val
; CHECK-LABEL: test_default_align:
; CHECK: adrp x[[HIREG:[0-9]+]], var_default
@@ -106,7 +106,7 @@ define i32 @test_default_align() {
define i64 @test_default_unaligned() {
%addr = bitcast [2 x i32]* @var_default to i64*
- %val = load i64* %addr
+ %val = load i64, i64* %addr
ret i64 %val
; CHECK-LABEL: test_default_unaligned:
; CHECK: adrp [[HIREG:x[0-9]+]], var_default
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ext.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextd:
;CHECK: {{ext.8b.*#3}}
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
ret <8 x i8> %tmp3
}
@@ -12,8 +12,8 @@ define <8 x i8> @test_vextd(<8 x i8>* %A
define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextRd:
;CHECK: {{ext.8b.*#5}}
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
ret <8 x i8> %tmp3
}
@@ -21,8 +21,8 @@ define <8 x i8> @test_vextRd(<8 x i8>* %
define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextq:
;CHECK: {{ext.16b.*3}}
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
ret <16 x i8> %tmp3
}
@@ -30,8 +30,8 @@ define <16 x i8> @test_vextq(<16 x i8>*
define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextRq:
;CHECK: {{ext.16b.*7}}
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
ret <16 x i8> %tmp3
}
@@ -39,8 +39,8 @@ define <16 x i8> @test_vextRq(<16 x i8>*
define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: test_vextd16:
;CHECK: {{ext.8b.*#6}}
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
ret <4 x i16> %tmp3
}
@@ -48,8 +48,8 @@ define <4 x i16> @test_vextd16(<4 x i16>
define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: test_vextq32:
;CHECK: {{ext.16b.*12}}
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
ret <4 x i32> %tmp3
}
@@ -59,8 +59,8 @@ define <4 x i32> @test_vextq32(<4 x i32>
define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextd_undef:
;CHECK: {{ext.8b.*}}
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
ret <8 x i8> %tmp3
}
@@ -68,8 +68,8 @@ define <8 x i8> @test_vextd_undef(<8 x i
define <8 x i8> @test_vextd_undef2(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextd_undef2:
;CHECK: {{ext.8b.*#6}}
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 2, i32 3, i32 4, i32 5>
ret <8 x i8> %tmp3
}
@@ -77,8 +77,8 @@ define <8 x i8> @test_vextd_undef2(<8 x
define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: test_vextRq_undef:
;CHECK: {{ext.16b.*#7}}
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
ret <16 x i8> %tmp3
}
@@ -86,7 +86,7 @@ define <16 x i8> @test_vextRq_undef(<16
define <8 x i16> @test_vextRq_undef2(<8 x i16>* %A) nounwind {
;CHECK-LABEL: test_vextRq_undef2:
;CHECK: {{ext.16b.*#10}}
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%vext = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 4>
ret <8 x i16> %vext;
}
@@ -101,8 +101,8 @@ define <4 x i16> @test_interleaved(<8 x
;CHECK-LABEL: test_interleaved:
;CHECK: ext.8b
;CHECK: zip1.4h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 3, i32 8, i32 5, i32 9>
ret <4 x i16> %tmp3
}
@@ -111,8 +111,8 @@ define <4 x i16> @test_interleaved(<8 x
define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: test_undef:
;CHECK: zip1.4h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
ret <4 x i16> %tmp3
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-extend.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-extend.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-extend.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-extend.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ define i64 @foo(i32 %i) {
; CHECK: ret
%idxprom = sext i32 %i to i64
%arrayidx = getelementptr inbounds [0 x i32], [0 x i32]* @array, i64 0, i64 %idxprom
- %tmp1 = load i32* %arrayidx, align 4
+ %tmp1 = load i32, i32* %arrayidx, align 4
%conv = sext i32 %tmp1 to i64
ret i64 %conv
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-extload-knownzero.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
bb1:
; CHECK: %bb1
; CHECK: ldrh [[REG:w[0-9]+]]
- %tmp2 = load i16* %ptr, align 2
+ %tmp2 = load i16, i16* %ptr, align 2
br label %bb2
bb2:
; CHECK: %bb2
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-addr-offset.ll Fri Feb 27 15:17:42 2015
@@ -13,7 +13,7 @@ entry:
; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]]
; CHECK: ldr w0, [x[[REG3]]]
; CHECK: ret
- %0 = load i32* getelementptr inbounds ([5001 x i32]* @sortlist, i32 0, i64 5000), align 4
+ %0 = load i32, i32* getelementptr inbounds ([5001 x i32]* @sortlist, i32 0, i64 5000), align 4
ret i32 %0
}
@@ -26,7 +26,7 @@ entry:
; CHECK: add x[[REG3:[0-9]+]], x[[REG1]], x[[REG2]]
; CHECK: ldr x0, [x[[REG3]]]
; CHECK: ret
- %0 = load i64* getelementptr inbounds ([5001 x i64]* @sortlist2, i32 0, i64 5000), align 4
+ %0 = load i64, i64* getelementptr inbounds ([5001 x i64]* @sortlist2, i32 0, i64 5000), align 4
ret i64 %0
}
@@ -40,8 +40,8 @@ entry:
; CHECK: movz x[[REG:[0-9]+]], #0xb3a, lsl #32
; CHECK: movk x[[REG]], #0x73ce, lsl #16
; CHECK: movk x[[REG]], #0x2ff2
- %0 = load i8** @pd2, align 8
+ %0 = load i8*, i8** @pd2, align 8
%arrayidx = getelementptr inbounds i8, i8* %0, i64 12345678901234
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
ret i8 %1
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-br.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-br.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-br.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-br.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
define void @branch1() nounwind uwtable ssp {
%x = alloca i32, align 4
store i32 0, i32* %x, align 4
- %1 = load i32* %x, align 4
+ %1 = load i32, i32* %x, align 4
%2 = icmp ne i32 %1, 0
br i1 %2, label %3, label %4
@@ -23,7 +23,7 @@ define void @branch2() nounwind uwtable
store i32 1, i32* %y, align 4
store i32 1, i32* %x, align 4
store i32 0, i32* %z, align 4
- %2 = load i32* %x, align 4
+ %2 = load i32, i32* %x, align 4
%3 = icmp ne i32 %2, 0
br i1 %3, label %4, label %5
@@ -32,12 +32,12 @@ define void @branch2() nounwind uwtable
br label %14
; <label>:5 ; preds = %0
- %6 = load i32* %y, align 4
+ %6 = load i32, i32* %y, align 4
%7 = icmp ne i32 %6, 0
br i1 %7, label %8, label %13
; <label>:8 ; preds = %5
- %9 = load i32* %z, align 4
+ %9 = load i32, i32* %z, align 4
%10 = icmp ne i32 %9, 0
br i1 %10, label %11, label %12
@@ -53,7 +53,7 @@ define void @branch2() nounwind uwtable
br label %14
; <label>:14 ; preds = %4, %11, %12, %13
- %15 = load i32* %1
+ %15 = load i32, i32* %1
ret void
}
@@ -93,7 +93,7 @@ entry:
store i16 %b, i16* %b.addr, align 2
store i32 %c, i32* %c.addr, align 4
store i64 %d, i64* %d.addr, align 8
- %0 = load i16* %b.addr, align 2
+ %0 = load i16, i16* %b.addr, align 2
; CHECK: and w0, w0, #0x1
; CHECK: cmp w0, #0
; CHECK: b.eq LBB4_2
@@ -105,7 +105,7 @@ if.then:
br label %if.end
if.end: ; preds = %if.then, %entry
- %1 = load i32* %c.addr, align 4
+ %1 = load i32, i32* %c.addr, align 4
; CHECK: and w[[REG:[0-9]+]], w{{[0-9]+}}, #0x1
; CHECK: cmp w[[REG]], #0
; CHECK: b.eq LBB4_4
@@ -117,7 +117,7 @@ if.then3:
br label %if.end4
if.end4: ; preds = %if.then3, %if.end
- %2 = load i64* %d.addr, align 8
+ %2 = load i64, i64* %d.addr, align 8
; CHECK: cmp w{{[0-9]+}}, #0
; CHECK: b.eq LBB4_6
%conv5 = trunc i64 %2 to i1
@@ -128,7 +128,7 @@ if.then7:
br label %if.end8
if.end8: ; preds = %if.then7, %if.end4
- %3 = load i8* %a.addr, align 1
+ %3 = load i8, i8* %a.addr, align 1
ret i8 %3
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-call.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-call.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-call.ll Fri Feb 27 15:17:42 2015
@@ -23,7 +23,7 @@ define i32 @call1(i32 %a) nounwind {
entry:
%a.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- %tmp = load i32* %a.addr, align 4
+ %tmp = load i32, i32* %a.addr, align 4
ret i32 %tmp
}
@@ -35,7 +35,7 @@ entry:
; CHECK-NEXT: bl _call1
%a.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- %tmp = load i32* %a.addr, align 4
+ %tmp = load i32, i32* %a.addr, align 4
%call = call i32 @call1(i32 %tmp)
ret i32 %call
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-conversion.ll Fri Feb 27 15:17:42 2015
@@ -27,16 +27,16 @@ entry:
store i16 %b, i16* %b.addr, align 2
store i32 %c, i32* %c.addr, align 4
store i64 %d, i64* %d.addr, align 8
- %tmp = load i64* %d.addr, align 8
+ %tmp = load i64, i64* %d.addr, align 8
%conv = trunc i64 %tmp to i32
store i32 %conv, i32* %c.addr, align 4
- %tmp1 = load i32* %c.addr, align 4
+ %tmp1 = load i32, i32* %c.addr, align 4
%conv2 = trunc i32 %tmp1 to i16
store i16 %conv2, i16* %b.addr, align 2
- %tmp3 = load i16* %b.addr, align 2
+ %tmp3 = load i16, i16* %b.addr, align 2
%conv4 = trunc i16 %tmp3 to i8
store i8 %conv4, i8* %a.addr, align 1
- %tmp5 = load i8* %a.addr, align 1
+ %tmp5 = load i8, i8* %a.addr, align 1
%conv6 = zext i8 %tmp5 to i32
ret i32 %conv6
}
@@ -66,16 +66,16 @@ entry:
store i16 %b, i16* %b.addr, align 2
store i32 %c, i32* %c.addr, align 4
store i64 %d, i64* %d.addr, align 8
- %tmp = load i8* %a.addr, align 1
+ %tmp = load i8, i8* %a.addr, align 1
%conv = zext i8 %tmp to i16
store i16 %conv, i16* %b.addr, align 2
- %tmp1 = load i16* %b.addr, align 2
+ %tmp1 = load i16, i16* %b.addr, align 2
%conv2 = zext i16 %tmp1 to i32
store i32 %conv2, i32* %c.addr, align 4
- %tmp3 = load i32* %c.addr, align 4
+ %tmp3 = load i32, i32* %c.addr, align 4
%conv4 = zext i32 %tmp3 to i64
store i64 %conv4, i64* %d.addr, align 8
- %tmp5 = load i64* %d.addr, align 8
+ %tmp5 = load i64, i64* %d.addr, align 8
ret i64 %tmp5
}
@@ -121,16 +121,16 @@ entry:
store i16 %b, i16* %b.addr, align 2
store i32 %c, i32* %c.addr, align 4
store i64 %d, i64* %d.addr, align 8
- %tmp = load i8* %a.addr, align 1
+ %tmp = load i8, i8* %a.addr, align 1
%conv = sext i8 %tmp to i16
store i16 %conv, i16* %b.addr, align 2
- %tmp1 = load i16* %b.addr, align 2
+ %tmp1 = load i16, i16* %b.addr, align 2
%conv2 = sext i16 %tmp1 to i32
store i32 %conv2, i32* %c.addr, align 4
- %tmp3 = load i32* %c.addr, align 4
+ %tmp3 = load i32, i32* %c.addr, align 4
%conv4 = sext i32 %tmp3 to i64
store i64 %conv4, i64* %d.addr, align 8
- %tmp5 = load i64* %d.addr, align 8
+ %tmp5 = load i64, i64* %d.addr, align 8
ret i64 %tmp5
}
@@ -409,7 +409,7 @@ define void @stack_trunc() nounwind {
; CHECK: add sp, sp, #16
%a = alloca i8, align 1
%b = alloca i64, align 8
- %c = load i64* %b, align 8
+ %c = load i64, i64* %b, align 8
%d = trunc i64 %c to i8
store i8 %d, i8* %a, align 1
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-gv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-gv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-gv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-gv.ll Fri Feb 27 15:17:42 2015
@@ -26,12 +26,12 @@ entry:
; CHECK: and [[REG8:x[0-9]+]], [[REG7]], #0xffff
; CHECK: str [[REG8]], {{\[}}[[REG1]]{{\]}}
; CHECK: ldr {{x[0-9]+}}, {{\[}}[[REG1]]{{\]}}
- %0 = load i64* @seed, align 8
+ %0 = load i64, i64* @seed, align 8
%mul = mul nsw i64 %0, 1309
%add = add nsw i64 %mul, 13849
%and = and i64 %add, 65535
store i64 %and, i64* @seed, align 8
- %1 = load i64* @seed, align 8
+ %1 = load i64, i64* @seed, align 8
%conv = trunc i64 %1 to i32
ret i32 %conv
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-indirectbr.ll Fri Feb 27 15:17:42 2015
@@ -8,10 +8,10 @@ entry:
%retval = alloca i32, align 4
%target.addr = alloca i32, align 4
store i32 %target, i32* %target.addr, align 4
- %0 = load i32* %target.addr, align 4
+ %0 = load i32, i32* %target.addr, align 4
%idxprom = zext i32 %0 to i64
%arrayidx = getelementptr inbounds [2 x i8*], [2 x i8*]* @fn.table, i32 0, i64 %idxprom
- %1 = load i8** %arrayidx, align 8
+ %1 = load i8*, i8** %arrayidx, align 8
br label %indirectgoto
ZERO: ; preds = %indirectgoto
@@ -25,7 +25,7 @@ ONE:
br label %return
return: ; preds = %ONE, %ZERO
- %2 = load i32* %retval
+ %2 = load i32, i32* %retval
ret i32 %2
indirectgoto: ; preds = %entry
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-ret.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-ret.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-ret.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel-ret.ll Fri Feb 27 15:17:42 2015
@@ -16,7 +16,7 @@ entry:
; CHECK: ret
%a.addr = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
- %tmp = load i32* %a.addr, align 4
+ %tmp = load i32, i32* %a.addr, align 4
ret i32 %tmp
}
@@ -28,7 +28,7 @@ entry:
; CHECK: ret
%a.addr = alloca i64, align 8
store i64 %a, i64* %a.addr, align 8
- %tmp = load i64* %a.addr, align 8
+ %tmp = load i64, i64* %a.addr, align 8
ret i64 %tmp
}
@@ -38,7 +38,7 @@ entry:
; CHECK: sxth w0, w0
%a.addr = alloca i16, align 1
store i16 %a, i16* %a.addr, align 1
- %0 = load i16* %a.addr, align 1
+ %0 = load i16, i16* %a.addr, align 1
ret i16 %0
}
@@ -48,7 +48,7 @@ entry:
; CHECK: sxtb w0, w0
%a.addr = alloca i8, align 1
store i8 %a, i8* %a.addr, align 1
- %0 = load i8* %a.addr, align 1
+ %0 = load i8, i8* %a.addr, align 1
ret i8 %0
}
@@ -58,6 +58,6 @@ entry:
; CHECK: and w0, w0, #0x1
%a.addr = alloca i1, align 1
store i1 %a, i1* %a.addr, align 1
- %0 = load i1* %a.addr, align 1
+ %0 = load i1, i1* %a.addr, align 1
ret i1 %0
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fast-isel.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
; CHECK: ret
%a.addr = alloca i32, align 4
store i32 %a, i32* %a.addr
- %tmp = load i32* %a.addr
+ %tmp = load i32, i32* %a.addr
store i32 %tmp, i32* %a.addr
ret void
}
@@ -22,7 +22,7 @@ define void @t1(i64 %a) nounwind {
; CHECK: ret
%a.addr = alloca i64, align 4
store i64 %a, i64* %a.addr
- %tmp = load i64* %a.addr
+ %tmp = load i64, i64* %a.addr
store i64 %tmp, i64* %a.addr
ret void
}
@@ -39,7 +39,7 @@ entry:
; CHECK: ret
%a.addr = alloca i1, align 1
store i1 %a, i1* %a.addr, align 1
- %0 = load i1* %a.addr, align 1
+ %0 = load i1, i1* %a.addr, align 1
ret i1 %0
}
@@ -49,7 +49,7 @@ entry:
; CHECK: ldur w0, [x0, #-4]
; CHECK: ret
%0 = getelementptr i32, i32 *%ptr, i32 -1
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
ret i32 %1
}
@@ -59,7 +59,7 @@ entry:
; CHECK: ldur w0, [x0, #-256]
; CHECK: ret
%0 = getelementptr i32, i32 *%ptr, i32 -64
- %1 = load i32* %0, align 4
+ %1 = load i32, i32* %0, align 4
ret i32 %1
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fastisel-gep-promote-before-add.ll Fri Feb 27 15:17:42 2015
@@ -6,13 +6,13 @@ define zeroext i8 @gep_promotion(i8* %pt
entry:
%ptr.addr = alloca i8*, align 8
%add = add i8 64, 64 ; 0x40 + 0x40
- %0 = load i8** %ptr.addr, align 8
+ %0 = load i8*, i8** %ptr.addr, align 8
; CHECK-LABEL: _gep_promotion:
; CHECK: ldrb {{[a-z][0-9]+}}, {{\[[a-z][0-9]+\]}}
%arrayidx = getelementptr inbounds i8, i8* %0, i8 %add
- %1 = load i8* %arrayidx, align 1
+ %1 = load i8, i8* %arrayidx, align 1
ret i8 %1
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fmuladd.ll Fri Feb 27 15:17:42 2015
@@ -4,9 +4,9 @@ define float @test_f32(float* %A, float*
;CHECK-LABEL: test_f32:
;CHECK: fmadd
;CHECK-NOT: fmadd
- %tmp1 = load float* %A
- %tmp2 = load float* %B
- %tmp3 = load float* %C
+ %tmp1 = load float, float* %A
+ %tmp2 = load float, float* %B
+ %tmp3 = load float, float* %C
%tmp4 = call float @llvm.fmuladd.f32(float %tmp1, float %tmp2, float %tmp3)
ret float %tmp4
}
@@ -15,9 +15,9 @@ define <2 x float> @test_v2f32(<2 x floa
;CHECK-LABEL: test_v2f32:
;CHECK: fmla.2s
;CHECK-NOT: fmla.2s
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
- %tmp3 = load <2 x float>* %C
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
+ %tmp3 = load <2 x float>, <2 x float>* %C
%tmp4 = call <2 x float> @llvm.fmuladd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2, <2 x float> %tmp3)
ret <2 x float> %tmp4
}
@@ -26,9 +26,9 @@ define <4 x float> @test_v4f32(<4 x floa
;CHECK-LABEL: test_v4f32:
;CHECK: fmla.4s
;CHECK-NOT: fmla.4s
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
- %tmp3 = load <4 x float>* %C
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
+ %tmp3 = load <4 x float>, <4 x float>* %C
%tmp4 = call <4 x float> @llvm.fmuladd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2, <4 x float> %tmp3)
ret <4 x float> %tmp4
}
@@ -38,9 +38,9 @@ define <8 x float> @test_v8f32(<8 x floa
;CHECK: fmla.4s
;CHECK: fmla.4s
;CHECK-NOT: fmla.4s
- %tmp1 = load <8 x float>* %A
- %tmp2 = load <8 x float>* %B
- %tmp3 = load <8 x float>* %C
+ %tmp1 = load <8 x float>, <8 x float>* %A
+ %tmp2 = load <8 x float>, <8 x float>* %B
+ %tmp3 = load <8 x float>, <8 x float>* %C
%tmp4 = call <8 x float> @llvm.fmuladd.v8f32(<8 x float> %tmp1, <8 x float> %tmp2, <8 x float> %tmp3)
ret <8 x float> %tmp4
}
@@ -49,9 +49,9 @@ define double @test_f64(double* %A, doub
;CHECK-LABEL: test_f64:
;CHECK: fmadd
;CHECK-NOT: fmadd
- %tmp1 = load double* %A
- %tmp2 = load double* %B
- %tmp3 = load double* %C
+ %tmp1 = load double, double* %A
+ %tmp2 = load double, double* %B
+ %tmp3 = load double, double* %C
%tmp4 = call double @llvm.fmuladd.f64(double %tmp1, double %tmp2, double %tmp3)
ret double %tmp4
}
@@ -60,9 +60,9 @@ define <2 x double> @test_v2f64(<2 x dou
;CHECK-LABEL: test_v2f64:
;CHECK: fmla.2d
;CHECK-NOT: fmla.2d
- %tmp1 = load <2 x double>* %A
- %tmp2 = load <2 x double>* %B
- %tmp3 = load <2 x double>* %C
+ %tmp1 = load <2 x double>, <2 x double>* %A
+ %tmp2 = load <2 x double>, <2 x double>* %B
+ %tmp3 = load <2 x double>, <2 x double>* %C
%tmp4 = call <2 x double> @llvm.fmuladd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2, <2 x double> %tmp3)
ret <2 x double> %tmp4
}
@@ -72,9 +72,9 @@ define <4 x double> @test_v4f64(<4 x dou
;CHECK: fmla.2d
;CHECK: fmla.2d
;CHECK-NOT: fmla.2d
- %tmp1 = load <4 x double>* %A
- %tmp2 = load <4 x double>* %B
- %tmp3 = load <4 x double>* %C
+ %tmp1 = load <4 x double>, <4 x double>* %A
+ %tmp2 = load <4 x double>, <4 x double>* %B
+ %tmp3 = load <4 x double>, <4 x double>* %C
%tmp4 = call <4 x double> @llvm.fmuladd.v4f64(<4 x double> %tmp1, <4 x double> %tmp2, <4 x double> %tmp3)
ret <4 x double> %tmp4
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fold-address.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fold-address.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fold-address.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fold-address.ll Fri Feb 27 15:17:42 2015
@@ -14,23 +14,23 @@ entry:
; CHECK: ldp d0, d1, [x[[REG]]]
; CHECK: ldp d2, d3, [x[[REG]], #16]
; CHECK: ret
- %ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+ %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
%0 = bitcast %0* %self to i8*
%add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr10.0 = bitcast i8* %add.ptr to double*
- %tmp11 = load double* %add.ptr10.0, align 8
+ %tmp11 = load double, double* %add.ptr10.0, align 8
%add.ptr.sum = add i64 %ivar, 8
%add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum
%1 = bitcast i8* %add.ptr10.1 to double*
- %tmp12 = load double* %1, align 8
+ %tmp12 = load double, double* %1, align 8
%add.ptr.sum17 = add i64 %ivar, 16
%add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr.sum17
%add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
- %tmp = load double* %add.ptr4.1.0, align 8
+ %tmp = load double, double* %add.ptr4.1.0, align 8
%add.ptr4.1.sum = add i64 %ivar, 24
%add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %add.ptr4.1.sum
%2 = bitcast i8* %add.ptr4.1.1 to double*
- %tmp5 = load double* %2, align 8
+ %tmp5 = load double, double* %2, align 8
%insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
%insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
%insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0
@@ -46,20 +46,20 @@ entry:
; CHECK: ldr d0, [x0, x{{[0-9]+}}]
; CHECK-NOT: add x0, x0, x1
; CHECK: ret
- %ivar = load i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
+ %ivar = load i64, i64* @"OBJC_IVAR_$_UIScreen._bounds", align 8, !invariant.load !4
%0 = bitcast %0* %self to i8*
%add.ptr = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr10.0 = bitcast i8* %add.ptr to double*
- %tmp11 = load double* %add.ptr10.0, align 8
+ %tmp11 = load double, double* %add.ptr10.0, align 8
%add.ptr10.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
%1 = bitcast i8* %add.ptr10.1 to double*
- %tmp12 = load double* %1, align 8
+ %tmp12 = load double, double* %1, align 8
%add.ptr4.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
%add.ptr4.1.0 = bitcast i8* %add.ptr4.1 to double*
- %tmp = load double* %add.ptr4.1.0, align 8
+ %tmp = load double, double* %add.ptr4.1.0, align 8
%add.ptr4.1.1 = getelementptr inbounds i8, i8* %0, i64 %ivar
%2 = bitcast i8* %add.ptr4.1.1 to double*
- %tmp5 = load double* %2, align 8
+ %tmp5 = load double, double* %2, align 8
%insert14 = insertvalue %struct.CGPoint undef, double %tmp11, 0
%insert16 = insertvalue %struct.CGPoint %insert14, double %tmp12, 1
%insert = insertvalue %struct.CGRect undef, %struct.CGPoint %insert16, 0
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fold-lsl.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define i16 @load_halfword(%struct.a* %ct
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
%arrayidx86 = getelementptr inbounds %struct.a, %struct.a* %ctx, i64 0, i64 %idxprom83
- %result = load i16* %arrayidx86, align 2
+ %result = load i16, i16* %arrayidx86, align 2
ret i16 %result
}
@@ -26,7 +26,7 @@ define i32 @load_word(%struct.b* %ctx, i
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
%arrayidx86 = getelementptr inbounds %struct.b, %struct.b* %ctx, i64 0, i64 %idxprom83
- %result = load i32* %arrayidx86, align 4
+ %result = load i32, i32* %arrayidx86, align 4
ret i32 %result
}
@@ -38,7 +38,7 @@ define i64 @load_doubleword(%struct.c* %
%conv82 = zext i32 %shr81 to i64
%idxprom83 = and i64 %conv82, 255
%arrayidx86 = getelementptr inbounds %struct.c, %struct.c* %ctx, i64 0, i64 %idxprom83
- %result = load i64* %arrayidx86, align 8
+ %result = load i64, i64* %arrayidx86, align 8
ret i64 %result
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fp128-folding.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@ define fp128 @test_folding() {
; CHECK-LABEL: test_folding:
%l = alloca i32
store i32 42, i32* %l
- %val = load i32* %l
+ %val = load i32, i32* %l
%fpval = sitofp i32 %val to fp128
; If the value is loaded from a constant pool into an fp128, it's been folded
; successfully.
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-fp128.ll Fri Feb 27 15:17:42 2015
@@ -6,8 +6,8 @@
define fp128 @test_add() {
; CHECK-LABEL: test_add:
- %lhs = load fp128* @lhs, align 16
- %rhs = load fp128* @rhs, align 16
+ %lhs = load fp128, fp128* @lhs, align 16
+ %rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
@@ -19,8 +19,8 @@ define fp128 @test_add() {
define fp128 @test_sub() {
; CHECK-LABEL: test_sub:
- %lhs = load fp128* @lhs, align 16
- %rhs = load fp128* @rhs, align 16
+ %lhs = load fp128, fp128* @lhs, align 16
+ %rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
@@ -32,8 +32,8 @@ define fp128 @test_sub() {
define fp128 @test_mul() {
; CHECK-LABEL: test_mul:
- %lhs = load fp128* @lhs, align 16
- %rhs = load fp128* @rhs, align 16
+ %lhs = load fp128, fp128* @lhs, align 16
+ %rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
@@ -45,8 +45,8 @@ define fp128 @test_mul() {
define fp128 @test_div() {
; CHECK-LABEL: test_div:
- %lhs = load fp128* @lhs, align 16
- %rhs = load fp128* @rhs, align 16
+ %lhs = load fp128, fp128* @lhs, align 16
+ %rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
@@ -60,7 +60,7 @@ define fp128 @test_div() {
define void @test_fptosi() {
; CHECK-LABEL: test_fptosi:
- %val = load fp128* @lhs, align 16
+ %val = load fp128, fp128* @lhs, align 16
%val32 = fptosi fp128 %val to i32
store i32 %val32, i32* @var32
@@ -75,7 +75,7 @@ define void @test_fptosi() {
define void @test_fptoui() {
; CHECK-LABEL: test_fptoui:
- %val = load fp128* @lhs, align 16
+ %val = load fp128, fp128* @lhs, align 16
%val32 = fptoui fp128 %val to i32
store i32 %val32, i32* @var32
@@ -91,12 +91,12 @@ define void @test_fptoui() {
define void @test_sitofp() {
; CHECK-LABEL: test_sitofp:
- %src32 = load i32* @var32
+ %src32 = load i32, i32* @var32
%val32 = sitofp i32 %src32 to fp128
store volatile fp128 %val32, fp128* @lhs
; CHECK: bl __floatsitf
- %src64 = load i64* @var64
+ %src64 = load i64, i64* @var64
%val64 = sitofp i64 %src64 to fp128
store volatile fp128 %val64, fp128* @lhs
; CHECK: bl __floatditf
@@ -107,12 +107,12 @@ define void @test_sitofp() {
define void @test_uitofp() {
; CHECK-LABEL: test_uitofp:
- %src32 = load i32* @var32
+ %src32 = load i32, i32* @var32
%val32 = uitofp i32 %src32 to fp128
store volatile fp128 %val32, fp128* @lhs
; CHECK: bl __floatunsitf
- %src64 = load i64* @var64
+ %src64 = load i64, i64* @var64
%val64 = uitofp i64 %src64 to fp128
store volatile fp128 %val64, fp128* @lhs
; CHECK: bl __floatunditf
@@ -123,8 +123,8 @@ define void @test_uitofp() {
define i1 @test_setcc1() {
; CHECK-LABEL: test_setcc1:
- %lhs = load fp128* @lhs, align 16
- %rhs = load fp128* @rhs, align 16
+ %lhs = load fp128, fp128* @lhs, align 16
+ %rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
@@ -142,8 +142,8 @@ define i1 @test_setcc1() {
define i1 @test_setcc2() {
; CHECK-LABEL: test_setcc2:
- %lhs = load fp128* @lhs, align 16
- %rhs = load fp128* @rhs, align 16
+ %lhs = load fp128, fp128* @lhs, align 16
+ %rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
@@ -164,8 +164,8 @@ define i1 @test_setcc2() {
define i32 @test_br_cc() {
; CHECK-LABEL: test_br_cc:
- %lhs = load fp128* @lhs, align 16
- %rhs = load fp128* @rhs, align 16
+ %lhs = load fp128, fp128* @lhs, align 16
+ %rhs = load fp128, fp128* @rhs, align 16
; CHECK: ldr q0, [{{x[0-9]+}}, :lo12:lhs]
; CHECK: ldr q1, [{{x[0-9]+}}, :lo12:rhs]
@@ -218,7 +218,7 @@ define void @test_select(i1 %cond, fp128
define void @test_round() {
; CHECK-LABEL: test_round:
- %val = load fp128* @lhs, align 16
+ %val = load fp128, fp128* @lhs, align 16
%float = fptrunc fp128 %val to float
store float %float, float* @varfloat, align 4
@@ -236,15 +236,15 @@ define void @test_round() {
define void @test_extend() {
; CHECK-LABEL: test_extend:
- %val = load fp128* @lhs, align 16
+ %val = load fp128, fp128* @lhs, align 16
- %float = load float* @varfloat
+ %float = load float, float* @varfloat
%fromfloat = fpext float %float to fp128
store volatile fp128 %fromfloat, fp128* @lhs, align 16
; CHECK: bl __extendsftf2
; CHECK: str q0, [{{x[0-9]+}}, :lo12:lhs]
- %double = load double* @vardouble
+ %double = load double, double* @vardouble
%fromdouble = fpext double %double to fp128
store volatile fp128 %fromdouble, fp128* @lhs, align 16
; CHECK: bl __extenddftf2
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-i16-subreg-extract.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
define i32 @foo(<4 x i16>* %__a) nounwind {
; CHECK-LABEL: foo:
; CHECK: umov.h w{{[0-9]+}}, v{{[0-9]+}}[0]
- %tmp18 = load <4 x i16>* %__a, align 8
+ %tmp18 = load <4 x i16>, <4 x i16>* %__a, align 8
%vget_lane = extractelement <4 x i16> %tmp18, i32 0
%conv = zext i16 %vget_lane to i32
%mul = mul nsw i32 3, %conv
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-memory.ll Fri Feb 27 15:17:42 2015
@@ -4,7 +4,7 @@ define void @store64(i64** nocapture %ou
; CHECK-LABEL: store64:
; CHECK: str x{{[0-9+]}}, [x{{[0-9+]}}], #8
; CHECK: ret
- %tmp = load i64** %out, align 8
+ %tmp = load i64*, i64** %out, align 8
%incdec.ptr = getelementptr inbounds i64, i64* %tmp, i64 1
store i64 %spacing, i64* %tmp, align 4
store i64* %incdec.ptr, i64** %out, align 8
@@ -15,7 +15,7 @@ define void @store32(i32** nocapture %ou
; CHECK-LABEL: store32:
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
; CHECK: ret
- %tmp = load i32** %out, align 8
+ %tmp = load i32*, i32** %out, align 8
%incdec.ptr = getelementptr inbounds i32, i32* %tmp, i64 1
store i32 %spacing, i32* %tmp, align 4
store i32* %incdec.ptr, i32** %out, align 8
@@ -26,7 +26,7 @@ define void @store16(i16** nocapture %ou
; CHECK-LABEL: store16:
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
; CHECK: ret
- %tmp = load i16** %out, align 8
+ %tmp = load i16*, i16** %out, align 8
%incdec.ptr = getelementptr inbounds i16, i16* %tmp, i64 1
store i16 %spacing, i16* %tmp, align 4
store i16* %incdec.ptr, i16** %out, align 8
@@ -37,7 +37,7 @@ define void @store8(i8** nocapture %out,
; CHECK-LABEL: store8:
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
; CHECK: ret
- %tmp = load i8** %out, align 8
+ %tmp = load i8*, i8** %out, align 8
%incdec.ptr = getelementptr inbounds i8, i8* %tmp, i64 1
store i8 %spacing, i8* %tmp, align 4
store i8* %incdec.ptr, i8** %out, align 8
@@ -48,7 +48,7 @@ define void @truncst64to32(i32** nocaptu
; CHECK-LABEL: truncst64to32:
; CHECK: str w{{[0-9+]}}, [x{{[0-9+]}}], #4
; CHECK: ret
- %tmp = load i32** %out, align 8
+ %tmp = load i32*, i32** %out, align 8
%incdec.ptr = getelementptr inbounds i32, i32* %tmp, i64 1
%trunc = trunc i64 %spacing to i32
store i32 %trunc, i32* %tmp, align 4
@@ -60,7 +60,7 @@ define void @truncst64to16(i16** nocaptu
; CHECK-LABEL: truncst64to16:
; CHECK: strh w{{[0-9+]}}, [x{{[0-9+]}}], #2
; CHECK: ret
- %tmp = load i16** %out, align 8
+ %tmp = load i16*, i16** %out, align 8
%incdec.ptr = getelementptr inbounds i16, i16* %tmp, i64 1
%trunc = trunc i64 %spacing to i16
store i16 %trunc, i16* %tmp, align 4
@@ -72,7 +72,7 @@ define void @truncst64to8(i8** nocapture
; CHECK-LABEL: truncst64to8:
; CHECK: strb w{{[0-9+]}}, [x{{[0-9+]}}], #1
; CHECK: ret
- %tmp = load i8** %out, align 8
+ %tmp = load i8*, i8** %out, align 8
%incdec.ptr = getelementptr inbounds i8, i8* %tmp, i64 1
%trunc = trunc i64 %spacing to i8
store i8 %trunc, i8* %tmp, align 4
@@ -85,7 +85,7 @@ define void @storef32(float** nocapture
; CHECK-LABEL: storef32:
; CHECK: str s{{[0-9+]}}, [x{{[0-9+]}}], #4
; CHECK: ret
- %tmp = load float** %out, align 8
+ %tmp = load float*, float** %out, align 8
%incdec.ptr = getelementptr inbounds float, float* %tmp, i64 1
store float %spacing, float* %tmp, align 4
store float* %incdec.ptr, float** %out, align 8
@@ -96,7 +96,7 @@ define void @storef64(double** nocapture
; CHECK-LABEL: storef64:
; CHECK: str d{{[0-9+]}}, [x{{[0-9+]}}], #8
; CHECK: ret
- %tmp = load double** %out, align 8
+ %tmp = load double*, double** %out, align 8
%incdec.ptr = getelementptr inbounds double, double* %tmp, i64 1
store double %spacing, double* %tmp, align 4
store double* %incdec.ptr, double** %out, align 8
@@ -108,7 +108,7 @@ define double * @pref64(double** nocaptu
; CHECK: ldr x0, [x0]
; CHECK-NEXT: str d0, [x0, #32]!
; CHECK-NEXT: ret
- %tmp = load double** %out, align 8
+ %tmp = load double*, double** %out, align 8
%ptr = getelementptr inbounds double, double* %tmp, i64 4
store double %spacing, double* %ptr, align 4
ret double *%ptr
@@ -119,7 +119,7 @@ define float * @pref32(float** nocapture
; CHECK: ldr x0, [x0]
; CHECK-NEXT: str s0, [x0, #12]!
; CHECK-NEXT: ret
- %tmp = load float** %out, align 8
+ %tmp = load float*, float** %out, align 8
%ptr = getelementptr inbounds float, float* %tmp, i64 3
store float %spacing, float* %ptr, align 4
ret float *%ptr
@@ -130,7 +130,7 @@ define i64 * @pre64(i64** nocapture %out
; CHECK: ldr x0, [x0]
; CHECK-NEXT: str x1, [x0, #16]!
; CHECK-NEXT: ret
- %tmp = load i64** %out, align 8
+ %tmp = load i64*, i64** %out, align 8
%ptr = getelementptr inbounds i64, i64* %tmp, i64 2
store i64 %spacing, i64* %ptr, align 4
ret i64 *%ptr
@@ -141,7 +141,7 @@ define i32 * @pre32(i32** nocapture %out
; CHECK: ldr x0, [x0]
; CHECK-NEXT: str w1, [x0, #8]!
; CHECK-NEXT: ret
- %tmp = load i32** %out, align 8
+ %tmp = load i32*, i32** %out, align 8
%ptr = getelementptr inbounds i32, i32* %tmp, i64 2
store i32 %spacing, i32* %ptr, align 4
ret i32 *%ptr
@@ -152,7 +152,7 @@ define i16 * @pre16(i16** nocapture %out
; CHECK: ldr x0, [x0]
; CHECK-NEXT: strh w1, [x0, #4]!
; CHECK-NEXT: ret
- %tmp = load i16** %out, align 8
+ %tmp = load i16*, i16** %out, align 8
%ptr = getelementptr inbounds i16, i16* %tmp, i64 2
store i16 %spacing, i16* %ptr, align 4
ret i16 *%ptr
@@ -163,7 +163,7 @@ define i8 * @pre8(i8** nocapture %out, i
; CHECK: ldr x0, [x0]
; CHECK-NEXT: strb w1, [x0, #2]!
; CHECK-NEXT: ret
- %tmp = load i8** %out, align 8
+ %tmp = load i8*, i8** %out, align 8
%ptr = getelementptr inbounds i8, i8* %tmp, i64 2
store i8 %spacing, i8* %ptr, align 4
ret i8 *%ptr
@@ -174,7 +174,7 @@ define i32 * @pretrunc64to32(i32** nocap
; CHECK: ldr x0, [x0]
; CHECK-NEXT: str w1, [x0, #8]!
; CHECK-NEXT: ret
- %tmp = load i32** %out, align 8
+ %tmp = load i32*, i32** %out, align 8
%ptr = getelementptr inbounds i32, i32* %tmp, i64 2
%trunc = trunc i64 %spacing to i32
store i32 %trunc, i32* %ptr, align 4
@@ -186,7 +186,7 @@ define i16 * @pretrunc64to16(i16** nocap
; CHECK: ldr x0, [x0]
; CHECK-NEXT: strh w1, [x0, #4]!
; CHECK-NEXT: ret
- %tmp = load i16** %out, align 8
+ %tmp = load i16*, i16** %out, align 8
%ptr = getelementptr inbounds i16, i16* %tmp, i64 2
%trunc = trunc i64 %spacing to i16
store i16 %trunc, i16* %ptr, align 4
@@ -198,7 +198,7 @@ define i8 * @pretrunc64to8(i8** nocaptur
; CHECK: ldr x0, [x0]
; CHECK-NEXT: strb w1, [x0, #2]!
; CHECK-NEXT: ret
- %tmp = load i8** %out, align 8
+ %tmp = load i8*, i8** %out, align 8
%ptr = getelementptr inbounds i8, i8* %tmp, i64 2
%trunc = trunc i64 %spacing to i8
store i8 %trunc, i8* %ptr, align 4
@@ -214,7 +214,7 @@ define double* @preidxf64(double* %src,
; CHECK: str d0, [x1]
; CHECK: ret
%ptr = getelementptr inbounds double, double* %src, i64 1
- %tmp = load double* %ptr, align 4
+ %tmp = load double, double* %ptr, align 4
store double %tmp, double* %out, align 4
ret double* %ptr
}
@@ -225,7 +225,7 @@ define float* @preidxf32(float* %src, fl
; CHECK: str s0, [x1]
; CHECK: ret
%ptr = getelementptr inbounds float, float* %src, i64 1
- %tmp = load float* %ptr, align 4
+ %tmp = load float, float* %ptr, align 4
store float %tmp, float* %out, align 4
ret float* %ptr
}
@@ -236,7 +236,7 @@ define i64* @preidx64(i64* %src, i64* %o
; CHECK: str x[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i64, i64* %src, i64 1
- %tmp = load i64* %ptr, align 4
+ %tmp = load i64, i64* %ptr, align 4
store i64 %tmp, i64* %out, align 4
ret i64* %ptr
}
@@ -246,7 +246,7 @@ define i32* @preidx32(i32* %src, i32* %o
; CHECK: str w[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i32, i32* %src, i64 1
- %tmp = load i32* %ptr, align 4
+ %tmp = load i32, i32* %ptr, align 4
store i32 %tmp, i32* %out, align 4
ret i32* %ptr
}
@@ -256,7 +256,7 @@ define i16* @preidx16zext32(i16* %src, i
; CHECK: str w[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i16, i16* %src, i64 1
- %tmp = load i16* %ptr, align 4
+ %tmp = load i16, i16* %ptr, align 4
%ext = zext i16 %tmp to i32
store i32 %ext, i32* %out, align 4
ret i16* %ptr
@@ -267,7 +267,7 @@ define i16* @preidx16zext64(i16* %src, i
; CHECK: str x[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i16, i16* %src, i64 1
- %tmp = load i16* %ptr, align 4
+ %tmp = load i16, i16* %ptr, align 4
%ext = zext i16 %tmp to i64
store i64 %ext, i64* %out, align 4
ret i16* %ptr
@@ -278,7 +278,7 @@ define i8* @preidx8zext32(i8* %src, i32*
; CHECK: str w[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i8, i8* %src, i64 1
- %tmp = load i8* %ptr, align 4
+ %tmp = load i8, i8* %ptr, align 4
%ext = zext i8 %tmp to i32
store i32 %ext, i32* %out, align 4
ret i8* %ptr
@@ -289,7 +289,7 @@ define i8* @preidx8zext64(i8* %src, i64*
; CHECK: str x[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i8, i8* %src, i64 1
- %tmp = load i8* %ptr, align 4
+ %tmp = load i8, i8* %ptr, align 4
%ext = zext i8 %tmp to i64
store i64 %ext, i64* %out, align 4
ret i8* %ptr
@@ -300,7 +300,7 @@ define i32* @preidx32sext64(i32* %src, i
; CHECK: str x[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i32, i32* %src, i64 1
- %tmp = load i32* %ptr, align 4
+ %tmp = load i32, i32* %ptr, align 4
%ext = sext i32 %tmp to i64
store i64 %ext, i64* %out, align 8
ret i32* %ptr
@@ -311,7 +311,7 @@ define i16* @preidx16sext32(i16* %src, i
; CHECK: str w[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i16, i16* %src, i64 1
- %tmp = load i16* %ptr, align 4
+ %tmp = load i16, i16* %ptr, align 4
%ext = sext i16 %tmp to i32
store i32 %ext, i32* %out, align 4
ret i16* %ptr
@@ -322,7 +322,7 @@ define i16* @preidx16sext64(i16* %src, i
; CHECK: str x[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i16, i16* %src, i64 1
- %tmp = load i16* %ptr, align 4
+ %tmp = load i16, i16* %ptr, align 4
%ext = sext i16 %tmp to i64
store i64 %ext, i64* %out, align 4
ret i16* %ptr
@@ -333,7 +333,7 @@ define i8* @preidx8sext32(i8* %src, i32*
; CHECK: str w[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i8, i8* %src, i64 1
- %tmp = load i8* %ptr, align 4
+ %tmp = load i8, i8* %ptr, align 4
%ext = sext i8 %tmp to i32
store i32 %ext, i32* %out, align 4
ret i8* %ptr
@@ -344,7 +344,7 @@ define i8* @preidx8sext64(i8* %src, i64*
; CHECK: str x[[REG]], [x1]
; CHECK: ret
%ptr = getelementptr inbounds i8, i8* %src, i64 1
- %tmp = load i8* %ptr, align 4
+ %tmp = load i8, i8* %ptr, align 4
%ext = sext i8 %tmp to i64
store i64 %ext, i64* %out, align 4
ret i8* %ptr
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst-2.ll Fri Feb 27 15:17:42 2015
@@ -10,8 +10,8 @@ target triple = "arm64-apple-ios7.0.0"
define void @f(double* %P1) #0 {
entry:
%arrayidx4 = getelementptr inbounds double, double* %P1, i64 1
- %0 = load double* %arrayidx4, align 8, !tbaa !1
- %1 = load double* %P1, align 8, !tbaa !1
+ %0 = load double, double* %arrayidx4, align 8, !tbaa !1
+ %1 = load double, double* %P1, align 8, !tbaa !1
%2 = insertelement <2 x double> undef, double %0, i32 0
%3 = insertelement <2 x double> %2, double %1, i32 1
%4 = fsub <2 x double> zeroinitializer, %3
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-indexed-vector-ldst.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define <8 x i8> @test_v8i8_pre_load(<8 x
; CHECK-LABEL: test_v8i8_pre_load:
; CHECK: ldr d0, [x0, #40]!
%newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
- %val = load <8 x i8>* %newaddr, align 8
+ %val = load <8 x i8>, <8 x i8>* %newaddr, align 8
store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
ret <8 x i8> %val
}
@@ -15,7 +15,7 @@ define <8 x i8> @test_v8i8_post_load(<8
; CHECK-LABEL: test_v8i8_post_load:
; CHECK: ldr d0, [x0], #40
%newaddr = getelementptr <8 x i8>, <8 x i8>* %addr, i32 5
- %val = load <8 x i8>* %addr, align 8
+ %val = load <8 x i8>, <8 x i8>* %addr, align 8
store <8 x i8>* %newaddr, <8 x i8>** bitcast(i8** @ptr to <8 x i8>**)
ret <8 x i8> %val
}
@@ -42,7 +42,7 @@ define <4 x i16> @test_v4i16_pre_load(<4
; CHECK-LABEL: test_v4i16_pre_load:
; CHECK: ldr d0, [x0, #40]!
%newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
- %val = load <4 x i16>* %newaddr, align 8
+ %val = load <4 x i16>, <4 x i16>* %newaddr, align 8
store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
ret <4 x i16> %val
}
@@ -51,7 +51,7 @@ define <4 x i16> @test_v4i16_post_load(<
; CHECK-LABEL: test_v4i16_post_load:
; CHECK: ldr d0, [x0], #40
%newaddr = getelementptr <4 x i16>, <4 x i16>* %addr, i32 5
- %val = load <4 x i16>* %addr, align 8
+ %val = load <4 x i16>, <4 x i16>* %addr, align 8
store <4 x i16>* %newaddr, <4 x i16>** bitcast(i8** @ptr to <4 x i16>**)
ret <4 x i16> %val
}
@@ -78,7 +78,7 @@ define <2 x i32> @test_v2i32_pre_load(<2
; CHECK-LABEL: test_v2i32_pre_load:
; CHECK: ldr d0, [x0, #40]!
%newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
- %val = load <2 x i32>* %newaddr, align 8
+ %val = load <2 x i32>, <2 x i32>* %newaddr, align 8
store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
ret <2 x i32> %val
}
@@ -87,7 +87,7 @@ define <2 x i32> @test_v2i32_post_load(<
; CHECK-LABEL: test_v2i32_post_load:
; CHECK: ldr d0, [x0], #40
%newaddr = getelementptr <2 x i32>, <2 x i32>* %addr, i32 5
- %val = load <2 x i32>* %addr, align 8
+ %val = load <2 x i32>, <2 x i32>* %addr, align 8
store <2 x i32>* %newaddr, <2 x i32>** bitcast(i8** @ptr to <2 x i32>**)
ret <2 x i32> %val
}
@@ -114,7 +114,7 @@ define <2 x float> @test_v2f32_pre_load(
; CHECK-LABEL: test_v2f32_pre_load:
; CHECK: ldr d0, [x0, #40]!
%newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
- %val = load <2 x float>* %newaddr, align 8
+ %val = load <2 x float>, <2 x float>* %newaddr, align 8
store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
ret <2 x float> %val
}
@@ -123,7 +123,7 @@ define <2 x float> @test_v2f32_post_load
; CHECK-LABEL: test_v2f32_post_load:
; CHECK: ldr d0, [x0], #40
%newaddr = getelementptr <2 x float>, <2 x float>* %addr, i32 5
- %val = load <2 x float>* %addr, align 8
+ %val = load <2 x float>, <2 x float>* %addr, align 8
store <2 x float>* %newaddr, <2 x float>** bitcast(i8** @ptr to <2 x float>**)
ret <2 x float> %val
}
@@ -150,7 +150,7 @@ define <1 x i64> @test_v1i64_pre_load(<1
; CHECK-LABEL: test_v1i64_pre_load:
; CHECK: ldr d0, [x0, #40]!
%newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
- %val = load <1 x i64>* %newaddr, align 8
+ %val = load <1 x i64>, <1 x i64>* %newaddr, align 8
store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
ret <1 x i64> %val
}
@@ -159,7 +159,7 @@ define <1 x i64> @test_v1i64_post_load(<
; CHECK-LABEL: test_v1i64_post_load:
; CHECK: ldr d0, [x0], #40
%newaddr = getelementptr <1 x i64>, <1 x i64>* %addr, i32 5
- %val = load <1 x i64>* %addr, align 8
+ %val = load <1 x i64>, <1 x i64>* %addr, align 8
store <1 x i64>* %newaddr, <1 x i64>** bitcast(i8** @ptr to <1 x i64>**)
ret <1 x i64> %val
}
@@ -186,7 +186,7 @@ define <16 x i8> @test_v16i8_pre_load(<1
; CHECK-LABEL: test_v16i8_pre_load:
; CHECK: ldr q0, [x0, #80]!
%newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
- %val = load <16 x i8>* %newaddr, align 8
+ %val = load <16 x i8>, <16 x i8>* %newaddr, align 8
store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
ret <16 x i8> %val
}
@@ -195,7 +195,7 @@ define <16 x i8> @test_v16i8_post_load(<
; CHECK-LABEL: test_v16i8_post_load:
; CHECK: ldr q0, [x0], #80
%newaddr = getelementptr <16 x i8>, <16 x i8>* %addr, i32 5
- %val = load <16 x i8>* %addr, align 8
+ %val = load <16 x i8>, <16 x i8>* %addr, align 8
store <16 x i8>* %newaddr, <16 x i8>** bitcast(i8** @ptr to <16 x i8>**)
ret <16 x i8> %val
}
@@ -222,7 +222,7 @@ define <8 x i16> @test_v8i16_pre_load(<8
; CHECK-LABEL: test_v8i16_pre_load:
; CHECK: ldr q0, [x0, #80]!
%newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
- %val = load <8 x i16>* %newaddr, align 8
+ %val = load <8 x i16>, <8 x i16>* %newaddr, align 8
store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
ret <8 x i16> %val
}
@@ -231,7 +231,7 @@ define <8 x i16> @test_v8i16_post_load(<
; CHECK-LABEL: test_v8i16_post_load:
; CHECK: ldr q0, [x0], #80
%newaddr = getelementptr <8 x i16>, <8 x i16>* %addr, i32 5
- %val = load <8 x i16>* %addr, align 8
+ %val = load <8 x i16>, <8 x i16>* %addr, align 8
store <8 x i16>* %newaddr, <8 x i16>** bitcast(i8** @ptr to <8 x i16>**)
ret <8 x i16> %val
}
@@ -258,7 +258,7 @@ define <4 x i32> @test_v4i32_pre_load(<4
; CHECK-LABEL: test_v4i32_pre_load:
; CHECK: ldr q0, [x0, #80]!
%newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
- %val = load <4 x i32>* %newaddr, align 8
+ %val = load <4 x i32>, <4 x i32>* %newaddr, align 8
store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
ret <4 x i32> %val
}
@@ -267,7 +267,7 @@ define <4 x i32> @test_v4i32_post_load(<
; CHECK-LABEL: test_v4i32_post_load:
; CHECK: ldr q0, [x0], #80
%newaddr = getelementptr <4 x i32>, <4 x i32>* %addr, i32 5
- %val = load <4 x i32>* %addr, align 8
+ %val = load <4 x i32>, <4 x i32>* %addr, align 8
store <4 x i32>* %newaddr, <4 x i32>** bitcast(i8** @ptr to <4 x i32>**)
ret <4 x i32> %val
}
@@ -295,7 +295,7 @@ define <4 x float> @test_v4f32_pre_load(
; CHECK-LABEL: test_v4f32_pre_load:
; CHECK: ldr q0, [x0, #80]!
%newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
- %val = load <4 x float>* %newaddr, align 8
+ %val = load <4 x float>, <4 x float>* %newaddr, align 8
store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
ret <4 x float> %val
}
@@ -304,7 +304,7 @@ define <4 x float> @test_v4f32_post_load
; CHECK-LABEL: test_v4f32_post_load:
; CHECK: ldr q0, [x0], #80
%newaddr = getelementptr <4 x float>, <4 x float>* %addr, i32 5
- %val = load <4 x float>* %addr, align 8
+ %val = load <4 x float>, <4 x float>* %addr, align 8
store <4 x float>* %newaddr, <4 x float>** bitcast(i8** @ptr to <4 x float>**)
ret <4 x float> %val
}
@@ -332,7 +332,7 @@ define <2 x i64> @test_v2i64_pre_load(<2
; CHECK-LABEL: test_v2i64_pre_load:
; CHECK: ldr q0, [x0, #80]!
%newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
- %val = load <2 x i64>* %newaddr, align 8
+ %val = load <2 x i64>, <2 x i64>* %newaddr, align 8
store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
ret <2 x i64> %val
}
@@ -341,7 +341,7 @@ define <2 x i64> @test_v2i64_post_load(<
; CHECK-LABEL: test_v2i64_post_load:
; CHECK: ldr q0, [x0], #80
%newaddr = getelementptr <2 x i64>, <2 x i64>* %addr, i32 5
- %val = load <2 x i64>* %addr, align 8
+ %val = load <2 x i64>, <2 x i64>* %addr, align 8
store <2 x i64>* %newaddr, <2 x i64>** bitcast(i8** @ptr to <2 x i64>**)
ret <2 x i64> %val
}
@@ -369,7 +369,7 @@ define <2 x double> @test_v2f64_pre_load
; CHECK-LABEL: test_v2f64_pre_load:
; CHECK: ldr q0, [x0, #80]!
%newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
- %val = load <2 x double>* %newaddr, align 8
+ %val = load <2 x double>, <2 x double>* %newaddr, align 8
store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
ret <2 x double> %val
}
@@ -378,7 +378,7 @@ define <2 x double> @test_v2f64_post_loa
; CHECK-LABEL: test_v2f64_post_load:
; CHECK: ldr q0, [x0], #80
%newaddr = getelementptr <2 x double>, <2 x double>* %addr, i32 5
- %val = load <2 x double>* %addr, align 8
+ %val = load <2 x double>, <2 x double>* %addr, align 8
store <2 x double>* %newaddr, <2 x double>** bitcast(i8** @ptr to <2 x double>**)
ret <2 x double> %val
}
@@ -5692,7 +5692,7 @@ declare void @llvm.aarch64.neon.st4lane.
define <16 x i8> @test_v16i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
; CHECK-LABEL: test_v16i8_post_imm_ld1r:
; CHECK: ld1r.16b { v0 }, [x0], #1
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
@@ -5717,7 +5717,7 @@ define <16 x i8> @test_v16i8_post_imm_ld
define <16 x i8> @test_v16i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
; CHECK-LABEL: test_v16i8_post_reg_ld1r:
; CHECK: ld1r.16b { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
@@ -5742,7 +5742,7 @@ define <16 x i8> @test_v16i8_post_reg_ld
define <8 x i8> @test_v8i8_post_imm_ld1r(i8* %bar, i8** %ptr) {
; CHECK-LABEL: test_v8i8_post_imm_ld1r:
; CHECK: ld1r.8b { v0 }, [x0], #1
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
@@ -5759,7 +5759,7 @@ define <8 x i8> @test_v8i8_post_imm_ld1r
define <8 x i8> @test_v8i8_post_reg_ld1r(i8* %bar, i8** %ptr, i64 %inc) {
; CHECK-LABEL: test_v8i8_post_reg_ld1r:
; CHECK: ld1r.8b { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
@@ -5776,7 +5776,7 @@ define <8 x i8> @test_v8i8_post_reg_ld1r
define <8 x i16> @test_v8i16_post_imm_ld1r(i16* %bar, i16** %ptr) {
; CHECK-LABEL: test_v8i16_post_imm_ld1r:
; CHECK: ld1r.8h { v0 }, [x0], #2
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
@@ -5793,7 +5793,7 @@ define <8 x i16> @test_v8i16_post_imm_ld
define <8 x i16> @test_v8i16_post_reg_ld1r(i16* %bar, i16** %ptr, i64 %inc) {
; CHECK-LABEL: test_v8i16_post_reg_ld1r:
; CHECK: ld1r.8h { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
@@ -5810,7 +5810,7 @@ define <8 x i16> @test_v8i16_post_reg_ld
define <4 x i16> @test_v4i16_post_imm_ld1r(i16* %bar, i16** %ptr) {
; CHECK-LABEL: test_v4i16_post_imm_ld1r:
; CHECK: ld1r.4h { v0 }, [x0], #2
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
@@ -5823,7 +5823,7 @@ define <4 x i16> @test_v4i16_post_imm_ld
define <4 x i16> @test_v4i16_post_reg_ld1r(i16* %bar, i16** %ptr, i64 %inc) {
; CHECK-LABEL: test_v4i16_post_reg_ld1r:
; CHECK: ld1r.4h { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
@@ -5836,7 +5836,7 @@ define <4 x i16> @test_v4i16_post_reg_ld
define <4 x i32> @test_v4i32_post_imm_ld1r(i32* %bar, i32** %ptr) {
; CHECK-LABEL: test_v4i32_post_imm_ld1r:
; CHECK: ld1r.4s { v0 }, [x0], #4
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
@@ -5849,7 +5849,7 @@ define <4 x i32> @test_v4i32_post_imm_ld
define <4 x i32> @test_v4i32_post_reg_ld1r(i32* %bar, i32** %ptr, i64 %inc) {
; CHECK-LABEL: test_v4i32_post_reg_ld1r:
; CHECK: ld1r.4s { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
@@ -5862,7 +5862,7 @@ define <4 x i32> @test_v4i32_post_reg_ld
define <2 x i32> @test_v2i32_post_imm_ld1r(i32* %bar, i32** %ptr) {
; CHECK-LABEL: test_v2i32_post_imm_ld1r:
; CHECK: ld1r.2s { v0 }, [x0], #4
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = getelementptr i32, i32* %bar, i64 1
@@ -5873,7 +5873,7 @@ define <2 x i32> @test_v2i32_post_imm_ld
define <2 x i32> @test_v2i32_post_reg_ld1r(i32* %bar, i32** %ptr, i64 %inc) {
; CHECK-LABEL: test_v2i32_post_reg_ld1r:
; CHECK: ld1r.2s { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = getelementptr i32, i32* %bar, i64 %inc
@@ -5884,7 +5884,7 @@ define <2 x i32> @test_v2i32_post_reg_ld
define <2 x i64> @test_v2i64_post_imm_ld1r(i64* %bar, i64** %ptr) {
; CHECK-LABEL: test_v2i64_post_imm_ld1r:
; CHECK: ld1r.2d { v0 }, [x0], #8
- %tmp1 = load i64* %bar
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
%tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
%tmp4 = getelementptr i64, i64* %bar, i64 1
@@ -5895,7 +5895,7 @@ define <2 x i64> @test_v2i64_post_imm_ld
define <2 x i64> @test_v2i64_post_reg_ld1r(i64* %bar, i64** %ptr, i64 %inc) {
; CHECK-LABEL: test_v2i64_post_reg_ld1r:
; CHECK: ld1r.2d { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load i64* %bar
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
%tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
%tmp4 = getelementptr i64, i64* %bar, i64 %inc
@@ -5906,7 +5906,7 @@ define <2 x i64> @test_v2i64_post_reg_ld
define <4 x float> @test_v4f32_post_imm_ld1r(float* %bar, float** %ptr) {
; CHECK-LABEL: test_v4f32_post_imm_ld1r:
; CHECK: ld1r.4s { v0 }, [x0], #4
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <4 x float> <float undef, float undef, float undef, float undef>, float %tmp1, i32 0
%tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
%tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
@@ -5919,7 +5919,7 @@ define <4 x float> @test_v4f32_post_imm_
define <4 x float> @test_v4f32_post_reg_ld1r(float* %bar, float** %ptr, i64 %inc) {
; CHECK-LABEL: test_v4f32_post_reg_ld1r:
; CHECK: ld1r.4s { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <4 x float> <float undef, float undef, float undef, float undef>, float %tmp1, i32 0
%tmp3 = insertelement <4 x float> %tmp2, float %tmp1, i32 1
%tmp4 = insertelement <4 x float> %tmp3, float %tmp1, i32 2
@@ -5932,7 +5932,7 @@ define <4 x float> @test_v4f32_post_reg_
define <2 x float> @test_v2f32_post_imm_ld1r(float* %bar, float** %ptr) {
; CHECK-LABEL: test_v2f32_post_imm_ld1r:
; CHECK: ld1r.2s { v0 }, [x0], #4
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
%tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
%tmp4 = getelementptr float, float* %bar, i64 1
@@ -5943,7 +5943,7 @@ define <2 x float> @test_v2f32_post_imm_
define <2 x float> @test_v2f32_post_reg_ld1r(float* %bar, float** %ptr, i64 %inc) {
; CHECK-LABEL: test_v2f32_post_reg_ld1r:
; CHECK: ld1r.2s { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <2 x float> <float undef, float undef>, float %tmp1, i32 0
%tmp3 = insertelement <2 x float> %tmp2, float %tmp1, i32 1
%tmp4 = getelementptr float, float* %bar, i64 %inc
@@ -5954,7 +5954,7 @@ define <2 x float> @test_v2f32_post_reg_
define <2 x double> @test_v2f64_post_imm_ld1r(double* %bar, double** %ptr) {
; CHECK-LABEL: test_v2f64_post_imm_ld1r:
; CHECK: ld1r.2d { v0 }, [x0], #8
- %tmp1 = load double* %bar
+ %tmp1 = load double, double* %bar
%tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
%tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
%tmp4 = getelementptr double, double* %bar, i64 1
@@ -5965,7 +5965,7 @@ define <2 x double> @test_v2f64_post_imm
define <2 x double> @test_v2f64_post_reg_ld1r(double* %bar, double** %ptr, i64 %inc) {
; CHECK-LABEL: test_v2f64_post_reg_ld1r:
; CHECK: ld1r.2d { v0 }, [x0], x{{[0-9]+}}
- %tmp1 = load double* %bar
+ %tmp1 = load double, double* %bar
%tmp2 = insertelement <2 x double> <double undef, double undef>, double %tmp1, i32 0
%tmp3 = insertelement <2 x double> %tmp2, double %tmp1, i32 1
%tmp4 = getelementptr double, double* %bar, i64 %inc
@@ -5976,7 +5976,7 @@ define <2 x double> @test_v2f64_post_reg
define <16 x i8> @test_v16i8_post_imm_ld1lane(i8* %bar, i8** %ptr, <16 x i8> %A) {
; CHECK-LABEL: test_v16i8_post_imm_ld1lane:
; CHECK: ld1.b { v0 }[1], [x0], #1
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
%tmp3 = getelementptr i8, i8* %bar, i64 1
store i8* %tmp3, i8** %ptr
@@ -5986,7 +5986,7 @@ define <16 x i8> @test_v16i8_post_imm_ld
define <16 x i8> @test_v16i8_post_reg_ld1lane(i8* %bar, i8** %ptr, i64 %inc, <16 x i8> %A) {
; CHECK-LABEL: test_v16i8_post_reg_ld1lane:
; CHECK: ld1.b { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> %A, i8 %tmp1, i32 1
%tmp3 = getelementptr i8, i8* %bar, i64 %inc
store i8* %tmp3, i8** %ptr
@@ -5996,7 +5996,7 @@ define <16 x i8> @test_v16i8_post_reg_ld
define <8 x i8> @test_v8i8_post_imm_ld1lane(i8* %bar, i8** %ptr, <8 x i8> %A) {
; CHECK-LABEL: test_v8i8_post_imm_ld1lane:
; CHECK: ld1.b { v0 }[1], [x0], #1
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
%tmp3 = getelementptr i8, i8* %bar, i64 1
store i8* %tmp3, i8** %ptr
@@ -6006,7 +6006,7 @@ define <8 x i8> @test_v8i8_post_imm_ld1l
define <8 x i8> @test_v8i8_post_reg_ld1lane(i8* %bar, i8** %ptr, i64 %inc, <8 x i8> %A) {
; CHECK-LABEL: test_v8i8_post_reg_ld1lane:
; CHECK: ld1.b { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> %A, i8 %tmp1, i32 1
%tmp3 = getelementptr i8, i8* %bar, i64 %inc
store i8* %tmp3, i8** %ptr
@@ -6016,7 +6016,7 @@ define <8 x i8> @test_v8i8_post_reg_ld1l
define <8 x i16> @test_v8i16_post_imm_ld1lane(i16* %bar, i16** %ptr, <8 x i16> %A) {
; CHECK-LABEL: test_v8i16_post_imm_ld1lane:
; CHECK: ld1.h { v0 }[1], [x0], #2
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
%tmp3 = getelementptr i16, i16* %bar, i64 1
store i16* %tmp3, i16** %ptr
@@ -6026,7 +6026,7 @@ define <8 x i16> @test_v8i16_post_imm_ld
define <8 x i16> @test_v8i16_post_reg_ld1lane(i16* %bar, i16** %ptr, i64 %inc, <8 x i16> %A) {
; CHECK-LABEL: test_v8i16_post_reg_ld1lane:
; CHECK: ld1.h { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> %A, i16 %tmp1, i32 1
%tmp3 = getelementptr i16, i16* %bar, i64 %inc
store i16* %tmp3, i16** %ptr
@@ -6036,7 +6036,7 @@ define <8 x i16> @test_v8i16_post_reg_ld
define <4 x i16> @test_v4i16_post_imm_ld1lane(i16* %bar, i16** %ptr, <4 x i16> %A) {
; CHECK-LABEL: test_v4i16_post_imm_ld1lane:
; CHECK: ld1.h { v0 }[1], [x0], #2
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
%tmp3 = getelementptr i16, i16* %bar, i64 1
store i16* %tmp3, i16** %ptr
@@ -6046,7 +6046,7 @@ define <4 x i16> @test_v4i16_post_imm_ld
define <4 x i16> @test_v4i16_post_reg_ld1lane(i16* %bar, i16** %ptr, i64 %inc, <4 x i16> %A) {
; CHECK-LABEL: test_v4i16_post_reg_ld1lane:
; CHECK: ld1.h { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> %A, i16 %tmp1, i32 1
%tmp3 = getelementptr i16, i16* %bar, i64 %inc
store i16* %tmp3, i16** %ptr
@@ -6056,7 +6056,7 @@ define <4 x i16> @test_v4i16_post_reg_ld
define <4 x i32> @test_v4i32_post_imm_ld1lane(i32* %bar, i32** %ptr, <4 x i32> %A) {
; CHECK-LABEL: test_v4i32_post_imm_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], #4
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
%tmp3 = getelementptr i32, i32* %bar, i64 1
store i32* %tmp3, i32** %ptr
@@ -6066,7 +6066,7 @@ define <4 x i32> @test_v4i32_post_imm_ld
define <4 x i32> @test_v4i32_post_reg_ld1lane(i32* %bar, i32** %ptr, i64 %inc, <4 x i32> %A) {
; CHECK-LABEL: test_v4i32_post_reg_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> %A, i32 %tmp1, i32 1
%tmp3 = getelementptr i32, i32* %bar, i64 %inc
store i32* %tmp3, i32** %ptr
@@ -6076,7 +6076,7 @@ define <4 x i32> @test_v4i32_post_reg_ld
define <2 x i32> @test_v2i32_post_imm_ld1lane(i32* %bar, i32** %ptr, <2 x i32> %A) {
; CHECK-LABEL: test_v2i32_post_imm_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], #4
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
%tmp3 = getelementptr i32, i32* %bar, i64 1
store i32* %tmp3, i32** %ptr
@@ -6086,7 +6086,7 @@ define <2 x i32> @test_v2i32_post_imm_ld
define <2 x i32> @test_v2i32_post_reg_ld1lane(i32* %bar, i32** %ptr, i64 %inc, <2 x i32> %A) {
; CHECK-LABEL: test_v2i32_post_reg_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> %A, i32 %tmp1, i32 1
%tmp3 = getelementptr i32, i32* %bar, i64 %inc
store i32* %tmp3, i32** %ptr
@@ -6096,7 +6096,7 @@ define <2 x i32> @test_v2i32_post_reg_ld
define <2 x i64> @test_v2i64_post_imm_ld1lane(i64* %bar, i64** %ptr, <2 x i64> %A) {
; CHECK-LABEL: test_v2i64_post_imm_ld1lane:
; CHECK: ld1.d { v0 }[1], [x0], #8
- %tmp1 = load i64* %bar
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
%tmp3 = getelementptr i64, i64* %bar, i64 1
store i64* %tmp3, i64** %ptr
@@ -6106,7 +6106,7 @@ define <2 x i64> @test_v2i64_post_imm_ld
define <2 x i64> @test_v2i64_post_reg_ld1lane(i64* %bar, i64** %ptr, i64 %inc, <2 x i64> %A) {
; CHECK-LABEL: test_v2i64_post_reg_ld1lane:
; CHECK: ld1.d { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load i64* %bar
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> %A, i64 %tmp1, i32 1
%tmp3 = getelementptr i64, i64* %bar, i64 %inc
store i64* %tmp3, i64** %ptr
@@ -6116,7 +6116,7 @@ define <2 x i64> @test_v2i64_post_reg_ld
define <4 x float> @test_v4f32_post_imm_ld1lane(float* %bar, float** %ptr, <4 x float> %A) {
; CHECK-LABEL: test_v4f32_post_imm_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], #4
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
%tmp3 = getelementptr float, float* %bar, i64 1
store float* %tmp3, float** %ptr
@@ -6126,7 +6126,7 @@ define <4 x float> @test_v4f32_post_imm_
define <4 x float> @test_v4f32_post_reg_ld1lane(float* %bar, float** %ptr, i64 %inc, <4 x float> %A) {
; CHECK-LABEL: test_v4f32_post_reg_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <4 x float> %A, float %tmp1, i32 1
%tmp3 = getelementptr float, float* %bar, i64 %inc
store float* %tmp3, float** %ptr
@@ -6136,7 +6136,7 @@ define <4 x float> @test_v4f32_post_reg_
define <2 x float> @test_v2f32_post_imm_ld1lane(float* %bar, float** %ptr, <2 x float> %A) {
; CHECK-LABEL: test_v2f32_post_imm_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], #4
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
%tmp3 = getelementptr float, float* %bar, i64 1
store float* %tmp3, float** %ptr
@@ -6146,7 +6146,7 @@ define <2 x float> @test_v2f32_post_imm_
define <2 x float> @test_v2f32_post_reg_ld1lane(float* %bar, float** %ptr, i64 %inc, <2 x float> %A) {
; CHECK-LABEL: test_v2f32_post_reg_ld1lane:
; CHECK: ld1.s { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <2 x float> %A, float %tmp1, i32 1
%tmp3 = getelementptr float, float* %bar, i64 %inc
store float* %tmp3, float** %ptr
@@ -6156,7 +6156,7 @@ define <2 x float> @test_v2f32_post_reg_
define <2 x double> @test_v2f64_post_imm_ld1lane(double* %bar, double** %ptr, <2 x double> %A) {
; CHECK-LABEL: test_v2f64_post_imm_ld1lane:
; CHECK: ld1.d { v0 }[1], [x0], #8
- %tmp1 = load double* %bar
+ %tmp1 = load double, double* %bar
%tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
%tmp3 = getelementptr double, double* %bar, i64 1
store double* %tmp3, double** %ptr
@@ -6166,7 +6166,7 @@ define <2 x double> @test_v2f64_post_imm
define <2 x double> @test_v2f64_post_reg_ld1lane(double* %bar, double** %ptr, i64 %inc, <2 x double> %A) {
; CHECK-LABEL: test_v2f64_post_reg_ld1lane:
; CHECK: ld1.d { v0 }[1], [x0], x{{[0-9]+}}
- %tmp1 = load double* %bar
+ %tmp1 = load double, double* %bar
%tmp2 = insertelement <2 x double> %A, double %tmp1, i32 1
%tmp3 = getelementptr double, double* %bar, i64 %inc
store double* %tmp3, double** %ptr
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-inline-asm.ll Fri Feb 27 15:17:42 2015
@@ -125,7 +125,7 @@ define void @t9() nounwind {
entry:
; CHECK-LABEL: t9:
%data = alloca <2 x double>, align 16
- %0 = load <2 x double>* %data, align 16
+ %0 = load <2 x double>, <2 x double>* %data, align 16
call void asm sideeffect "mov.2d v4, $0\0A", "w,~{v4}"(<2 x double> %0) nounwind
; CHECK: mov.2d v4, {{v[0-9]+}}
ret void
@@ -137,7 +137,7 @@ entry:
%data = alloca <2 x float>, align 8
%a = alloca [2 x float], align 4
%arraydecay = getelementptr inbounds [2 x float], [2 x float]* %a, i32 0, i32 0
- %0 = load <2 x float>* %data, align 8
+ %0 = load <2 x float>, <2 x float>* %data, align 8
call void asm sideeffect "ldr ${1:q}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
; CHECK: ldr {{q[0-9]+}}, [{{x[0-9]+}}]
call void asm sideeffect "ldr ${1:d}, [$0]\0A", "r,w"(float* %arraydecay, <2 x float> %0) nounwind
@@ -155,10 +155,10 @@ define void @t11() nounwind {
entry:
; CHECK-LABEL: t11:
%a = alloca i32, align 4
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
call void asm sideeffect "mov ${1:x}, ${0:x}\0A", "r,i"(i32 %0, i32 0) nounwind
; CHECK: mov xzr, {{x[0-9]+}}
- %1 = load i32* %a, align 4
+ %1 = load i32, i32* %a, align 4
call void asm sideeffect "mov ${1:w}, ${0:w}\0A", "r,i"(i32 %1, i32 0) nounwind
; CHECK: mov wzr, {{w[0-9]+}}
ret void
@@ -168,7 +168,7 @@ define void @t12() nounwind {
entry:
; CHECK-LABEL: t12:
%data = alloca <4 x float>, align 16
- %0 = load <4 x float>* %data, align 16
+ %0 = load <4 x float>, <4 x float>* %data, align 16
call void asm sideeffect "mov.2d v4, $0\0A", "x,~{v4}"(<4 x float> %0) nounwind
; CHECK mov.2d v4, {{v([0-9])|(1[0-5])}}
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ld1.ll Fri Feb 27 15:17:42 2015
@@ -449,7 +449,7 @@ define <8 x i8> @ld1r_8b(i8* %bar) {
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.8b { v0 }, [x0]
; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <8 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <8 x i8> %tmp3, i8 %tmp1, i32 2
@@ -466,7 +466,7 @@ define <16 x i8> @ld1r_16b(i8* %bar) {
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.16b { v0 }, [x0]
; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> <i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, i8 %tmp1, i32 0
%tmp3 = insertelement <16 x i8> %tmp2, i8 %tmp1, i32 1
%tmp4 = insertelement <16 x i8> %tmp3, i8 %tmp1, i32 2
@@ -491,7 +491,7 @@ define <4 x i16> @ld1r_4h(i16* %bar) {
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4h { v0 }, [x0]
; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> <i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <4 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <4 x i16> %tmp3, i16 %tmp1, i32 2
@@ -504,7 +504,7 @@ define <8 x i16> @ld1r_8h(i16* %bar) {
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.8h { v0 }, [x0]
; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> <i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef>, i16 %tmp1, i32 0
%tmp3 = insertelement <8 x i16> %tmp2, i16 %tmp1, i32 1
%tmp4 = insertelement <8 x i16> %tmp3, i16 %tmp1, i32 2
@@ -521,7 +521,7 @@ define <2 x i32> @ld1r_2s(i32* %bar) {
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2s { v0 }, [x0]
; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> <i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <2 x i32> %tmp2, i32 %tmp1, i32 1
ret <2 x i32> %tmp3
@@ -532,7 +532,7 @@ define <4 x i32> @ld1r_4s(i32* %bar) {
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4s { v0 }, [x0]
; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> <i32 undef, i32 undef, i32 undef, i32 undef>, i32 %tmp1, i32 0
%tmp3 = insertelement <4 x i32> %tmp2, i32 %tmp1, i32 1
%tmp4 = insertelement <4 x i32> %tmp3, i32 %tmp1, i32 2
@@ -545,7 +545,7 @@ define <2 x i64> @ld1r_2d(i64* %bar) {
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2d { v0 }, [x0]
; CHECK-NEXT ret
- %tmp1 = load i64* %bar
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> <i64 undef, i64 undef>, i64 %tmp1, i32 0
%tmp3 = insertelement <2 x i64> %tmp2, i64 %tmp1, i32 1
ret <2 x i64> %tmp3
@@ -804,7 +804,7 @@ define <16 x i8> @ld1_16b(<16 x i8> %V,
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.b { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <16 x i8> %V, i8 %tmp1, i32 0
ret <16 x i8> %tmp2
}
@@ -814,7 +814,7 @@ define <8 x i16> @ld1_8h(<8 x i16> %V, i
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.h { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <8 x i16> %V, i16 %tmp1, i32 0
ret <8 x i16> %tmp2
}
@@ -824,7 +824,7 @@ define <4 x i32> @ld1_4s(<4 x i32> %V, i
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <4 x i32> %V, i32 %tmp1, i32 0
ret <4 x i32> %tmp2
}
@@ -834,7 +834,7 @@ define <4 x float> @ld1_4s_float(<4 x fl
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <4 x float> %V, float %tmp1, i32 0
ret <4 x float> %tmp2
}
@@ -844,7 +844,7 @@ define <2 x i64> @ld1_2d(<2 x i64> %V, i
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.d { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load i64* %bar
+ %tmp1 = load i64, i64* %bar
%tmp2 = insertelement <2 x i64> %V, i64 %tmp1, i32 0
ret <2 x i64> %tmp2
}
@@ -854,7 +854,7 @@ define <2 x double> @ld1_2d_double(<2 x
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.d { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load double* %bar
+ %tmp1 = load double, double* %bar
%tmp2 = insertelement <2 x double> %V, double %tmp1, i32 0
ret <2 x double> %tmp2
}
@@ -864,7 +864,7 @@ define <1 x i64> @ld1_1d(<1 x i64>* %p)
; Make sure we are using the operands defined by the ABI
; CHECK: ldr [[REG:d[0-9]+]], [x0]
; CHECK-NEXT: ret
- %tmp = load <1 x i64>* %p, align 8
+ %tmp = load <1 x i64>, <1 x i64>* %p, align 8
ret <1 x i64> %tmp
}
@@ -873,7 +873,7 @@ define <8 x i8> @ld1_8b(<8 x i8> %V, i8*
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.b { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load i8* %bar
+ %tmp1 = load i8, i8* %bar
%tmp2 = insertelement <8 x i8> %V, i8 %tmp1, i32 0
ret <8 x i8> %tmp2
}
@@ -883,7 +883,7 @@ define <4 x i16> @ld1_4h(<4 x i16> %V, i
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.h { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load i16* %bar
+ %tmp1 = load i16, i16* %bar
%tmp2 = insertelement <4 x i16> %V, i16 %tmp1, i32 0
ret <4 x i16> %tmp2
}
@@ -893,7 +893,7 @@ define <2 x i32> @ld1_2s(<2 x i32> %V, i
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load i32* %bar
+ %tmp1 = load i32, i32* %bar
%tmp2 = insertelement <2 x i32> %V, i32 %tmp1, i32 0
ret <2 x i32> %tmp2
}
@@ -903,7 +903,7 @@ define <2 x float> @ld1_2s_float(<2 x fl
; Make sure we are using the operands defined by the ABI
; CHECK: ld1.s { v0 }[0], [x0]
; CHECK-NEXT ret
- %tmp1 = load float* %bar
+ %tmp1 = load float, float* %bar
%tmp2 = insertelement <2 x float> %V, float %tmp1, i32 0
ret <2 x float> %tmp2
}
@@ -919,12 +919,12 @@ entry:
; CHECK-NEXT: str d[[RESREGNUM]], [x2]
; CHECK-NEXT: ret
%tmp = bitcast i8* %a to i32*
- %tmp1 = load i32* %tmp, align 4
+ %tmp1 = load i32, i32* %tmp, align 4
%tmp2 = insertelement <2 x i32> undef, i32 %tmp1, i32 0
%lane = shufflevector <2 x i32> %tmp2, <2 x i32> undef, <2 x i32> zeroinitializer
%tmp3 = bitcast <2 x i32> %lane to <8 x i8>
%tmp4 = bitcast i8* %b to i32*
- %tmp5 = load i32* %tmp4, align 4
+ %tmp5 = load i32, i32* %tmp4, align 4
%tmp6 = insertelement <2 x i32> undef, i32 %tmp5, i32 0
%lane1 = shufflevector <2 x i32> %tmp6, <2 x i32> undef, <2 x i32> zeroinitializer
%tmp7 = bitcast <2 x i32> %lane1 to <8 x i8>
@@ -946,7 +946,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4s { v0 }, [x0]
; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
%tmp2 = insertelement <4 x float> %tmp1, float %tmp, i32 1
%tmp3 = insertelement <4 x float> %tmp2, float %tmp, i32 2
@@ -960,7 +960,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2s { v0 }, [x0]
; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
%tmp2 = insertelement <2 x float> %tmp1, float %tmp, i32 1
ret <2 x float> %tmp2
@@ -972,7 +972,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2d { v0 }, [x0]
; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
%tmp2 = insertelement <2 x double> %tmp1, double %tmp, i32 1
ret <2 x double> %tmp2
@@ -984,7 +984,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ldr d0, [x0]
; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
ret <1 x double> %tmp1
}
@@ -995,7 +995,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.4s { v0 }, [x0]
; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <4 x float> undef, float %tmp, i32 0
%lane = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %lane
@@ -1007,7 +1007,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2s { v0 }, [x0]
; CHECK-NEXT ret
- %tmp = load float* %x, align 4
+ %tmp = load float, float* %x, align 4
%tmp1 = insertelement <2 x float> undef, float %tmp, i32 0
%lane = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> zeroinitializer
ret <2 x float> %lane
@@ -1019,7 +1019,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ld1r.2d { v0 }, [x0]
; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <2 x double> undef, double %tmp, i32 0
%lane = shufflevector <2 x double> %tmp1, <2 x double> undef, <2 x i32> zeroinitializer
ret <2 x double> %lane
@@ -1031,7 +1031,7 @@ entry:
; Make sure we are using the operands defined by the ABI
; CHECK: ldr d0, [x0]
; CHECK-NEXT ret
- %tmp = load double* %x, align 4
+ %tmp = load double, double* %x, align 4
%tmp1 = insertelement <1 x double> undef, double %tmp, i32 0
%lane = shufflevector <1 x double> %tmp1, <1 x double> undef, <1 x i32> zeroinitializer
ret <1 x double> %lane
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ldp.ll Fri Feb 27 15:17:42 2015
@@ -5,9 +5,9 @@
; CHECK: ldp_int
; CHECK: ldp
define i32 @ldp_int(i32* %p) nounwind {
- %tmp = load i32* %p, align 4
+ %tmp = load i32, i32* %p, align 4
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
- %tmp1 = load i32* %add.ptr, align 4
+ %tmp1 = load i32, i32* %add.ptr, align 4
%add = add nsw i32 %tmp1, %tmp
ret i32 %add
}
@@ -15,9 +15,9 @@ define i32 @ldp_int(i32* %p) nounwind {
; CHECK: ldp_sext_int
; CHECK: ldpsw
define i64 @ldp_sext_int(i32* %p) nounwind {
- %tmp = load i32* %p, align 4
+ %tmp = load i32, i32* %p, align 4
%add.ptr = getelementptr inbounds i32, i32* %p, i64 1
- %tmp1 = load i32* %add.ptr, align 4
+ %tmp1 = load i32, i32* %add.ptr, align 4
%sexttmp = sext i32 %tmp to i64
%sexttmp1 = sext i32 %tmp1 to i64
%add = add nsw i64 %sexttmp1, %sexttmp
@@ -27,9 +27,9 @@ define i64 @ldp_sext_int(i32* %p) nounwi
; CHECK: ldp_long
; CHECK: ldp
define i64 @ldp_long(i64* %p) nounwind {
- %tmp = load i64* %p, align 8
+ %tmp = load i64, i64* %p, align 8
%add.ptr = getelementptr inbounds i64, i64* %p, i64 1
- %tmp1 = load i64* %add.ptr, align 8
+ %tmp1 = load i64, i64* %add.ptr, align 8
%add = add nsw i64 %tmp1, %tmp
ret i64 %add
}
@@ -37,9 +37,9 @@ define i64 @ldp_long(i64* %p) nounwind {
; CHECK: ldp_float
; CHECK: ldp
define float @ldp_float(float* %p) nounwind {
- %tmp = load float* %p, align 4
+ %tmp = load float, float* %p, align 4
%add.ptr = getelementptr inbounds float, float* %p, i64 1
- %tmp1 = load float* %add.ptr, align 4
+ %tmp1 = load float, float* %add.ptr, align 4
%add = fadd float %tmp, %tmp1
ret float %add
}
@@ -47,9 +47,9 @@ define float @ldp_float(float* %p) nounw
; CHECK: ldp_double
; CHECK: ldp
define double @ldp_double(double* %p) nounwind {
- %tmp = load double* %p, align 8
+ %tmp = load double, double* %p, align 8
%add.ptr = getelementptr inbounds double, double* %p, i64 1
- %tmp1 = load double* %add.ptr, align 8
+ %tmp1 = load double, double* %add.ptr, align 8
%add = fadd double %tmp, %tmp1
ret double %add
}
@@ -61,9 +61,9 @@ define i32 @ldur_int(i32* %a) nounwind {
; LDUR_CHK-NEXT: add w{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
- %tmp1 = load i32* %p1, align 2
+ %tmp1 = load i32, i32* %p1, align 2
%p2 = getelementptr inbounds i32, i32* %a, i32 -2
- %tmp2 = load i32* %p2, align 2
+ %tmp2 = load i32, i32* %p2, align 2
%tmp3 = add i32 %tmp1, %tmp2
ret i32 %tmp3
}
@@ -74,9 +74,9 @@ define i64 @ldur_sext_int(i32* %a) nounw
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds i32, i32* %a, i32 -1
- %tmp1 = load i32* %p1, align 2
+ %tmp1 = load i32, i32* %p1, align 2
%p2 = getelementptr inbounds i32, i32* %a, i32 -2
- %tmp2 = load i32* %p2, align 2
+ %tmp2 = load i32, i32* %p2, align 2
%sexttmp1 = sext i32 %tmp1 to i64
%sexttmp2 = sext i32 %tmp2 to i64
%tmp3 = add i64 %sexttmp1, %sexttmp2
@@ -89,9 +89,9 @@ define i64 @ldur_long(i64* %a) nounwind
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds i64, i64* %a, i64 -1
- %tmp1 = load i64* %p1, align 2
+ %tmp1 = load i64, i64* %p1, align 2
%p2 = getelementptr inbounds i64, i64* %a, i64 -2
- %tmp2 = load i64* %p2, align 2
+ %tmp2 = load i64, i64* %p2, align 2
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
}
@@ -102,9 +102,9 @@ define float @ldur_float(float* %a) {
; LDUR_CHK-NEXT: add s{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds float, float* %a, i64 -1
- %tmp1 = load float* %p1, align 2
+ %tmp1 = load float, float* %p1, align 2
%p2 = getelementptr inbounds float, float* %a, i64 -2
- %tmp2 = load float* %p2, align 2
+ %tmp2 = load float, float* %p2, align 2
%tmp3 = fadd float %tmp1, %tmp2
ret float %tmp3
}
@@ -115,9 +115,9 @@ define double @ldur_double(double* %a) {
; LDUR_CHK-NEXT: add d{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds double, double* %a, i64 -1
- %tmp1 = load double* %p1, align 2
+ %tmp1 = load double, double* %p1, align 2
%p2 = getelementptr inbounds double, double* %a, i64 -2
- %tmp2 = load double* %p2, align 2
+ %tmp2 = load double, double* %p2, align 2
%tmp3 = fadd double %tmp1, %tmp2
ret double %tmp3
}
@@ -130,9 +130,9 @@ define i64 @pairUpBarelyIn(i64* %a) noun
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds i64, i64* %a, i64 -31
- %tmp1 = load i64* %p1, align 2
+ %tmp1 = load i64, i64* %p1, align 2
%p2 = getelementptr inbounds i64, i64* %a, i64 -32
- %tmp2 = load i64* %p2, align 2
+ %tmp2 = load i64, i64* %p2, align 2
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
}
@@ -144,9 +144,9 @@ define i64 @pairUpBarelyInSext(i32* %a)
; LDUR_CHK-NEXT: add x{{[0-9]+}}, [[DST2]], [[DST1]]
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds i32, i32* %a, i64 -63
- %tmp1 = load i32* %p1, align 2
+ %tmp1 = load i32, i32* %p1, align 2
%p2 = getelementptr inbounds i32, i32* %a, i64 -64
- %tmp2 = load i32* %p2, align 2
+ %tmp2 = load i32, i32* %p2, align 2
%sexttmp1 = sext i32 %tmp1 to i64
%sexttmp2 = sext i32 %tmp2 to i64
%tmp3 = add i64 %sexttmp1, %sexttmp2
@@ -161,9 +161,9 @@ define i64 @pairUpBarelyOut(i64* %a) nou
; LDUR_CHK: add
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds i64, i64* %a, i64 -32
- %tmp1 = load i64* %p1, align 2
+ %tmp1 = load i64, i64* %p1, align 2
%p2 = getelementptr inbounds i64, i64* %a, i64 -33
- %tmp2 = load i64* %p2, align 2
+ %tmp2 = load i64, i64* %p2, align 2
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
}
@@ -176,9 +176,9 @@ define i64 @pairUpBarelyOutSext(i32* %a)
; LDUR_CHK: add
; LDUR_CHK-NEXT: ret
%p1 = getelementptr inbounds i32, i32* %a, i64 -64
- %tmp1 = load i32* %p1, align 2
+ %tmp1 = load i32, i32* %p1, align 2
%p2 = getelementptr inbounds i32, i32* %a, i64 -65
- %tmp2 = load i32* %p2, align 2
+ %tmp2 = load i32, i32* %p2, align 2
%sexttmp1 = sext i32 %tmp1 to i64
%sexttmp2 = sext i32 %tmp2 to i64
%tmp3 = add i64 %sexttmp1, %sexttmp2
@@ -196,13 +196,13 @@ define i64 @pairUpNotAligned(i64* %a) no
%bp1 = bitcast i64* %p1 to i8*
%bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
%dp1 = bitcast i8* %bp1p1 to i64*
- %tmp1 = load i64* %dp1, align 1
+ %tmp1 = load i64, i64* %dp1, align 1
%p2 = getelementptr inbounds i64, i64* %a, i64 -17
%bp2 = bitcast i64* %p2 to i8*
%bp2p1 = getelementptr inbounds i8, i8* %bp2, i64 1
%dp2 = bitcast i8* %bp2p1 to i64*
- %tmp2 = load i64* %dp2, align 1
+ %tmp2 = load i64, i64* %dp2, align 1
%tmp3 = add i64 %tmp1, %tmp2
ret i64 %tmp3
@@ -219,13 +219,13 @@ define i64 @pairUpNotAlignedSext(i32* %a
%bp1 = bitcast i32* %p1 to i8*
%bp1p1 = getelementptr inbounds i8, i8* %bp1, i64 1
%dp1 = bitcast i8* %bp1p1 to i32*
- %tmp1 = load i32* %dp1, align 1
+ %tmp1 = load i32, i32* %dp1, align 1
%p2 = getelementptr inbounds i32, i32* %a, i64 -17
%bp2 = bitcast i32* %p2 to i8*
%bp2p1 = getelementptr inbounds i8, i8* %bp2, i64 1
%dp2 = bitcast i8* %bp2p1 to i32*
- %tmp2 = load i32* %dp2, align 1
+ %tmp2 = load i32, i32* %dp2, align 1
%sexttmp1 = sext i32 %tmp1 to i64
%sexttmp2 = sext i32 %tmp2 to i64
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-ldur.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define i64 @_f0(i64* %p) {
; CHECK: ldur x0, [x0, #-8]
; CHECK-NEXT: ret
%tmp = getelementptr inbounds i64, i64* %p, i64 -1
- %ret = load i64* %tmp, align 2
+ %ret = load i64, i64* %tmp, align 2
ret i64 %ret
}
define i32 @_f1(i32* %p) {
@@ -13,7 +13,7 @@ define i32 @_f1(i32* %p) {
; CHECK: ldur w0, [x0, #-4]
; CHECK-NEXT: ret
%tmp = getelementptr inbounds i32, i32* %p, i64 -1
- %ret = load i32* %tmp, align 2
+ %ret = load i32, i32* %tmp, align 2
ret i32 %ret
}
define i16 @_f2(i16* %p) {
@@ -21,7 +21,7 @@ define i16 @_f2(i16* %p) {
; CHECK: ldurh w0, [x0, #-2]
; CHECK-NEXT: ret
%tmp = getelementptr inbounds i16, i16* %p, i64 -1
- %ret = load i16* %tmp, align 2
+ %ret = load i16, i16* %tmp, align 2
ret i16 %ret
}
define i8 @_f3(i8* %p) {
@@ -29,7 +29,7 @@ define i8 @_f3(i8* %p) {
; CHECK: ldurb w0, [x0, #-1]
; CHECK-NEXT: ret
%tmp = getelementptr inbounds i8, i8* %p, i64 -1
- %ret = load i8* %tmp, align 2
+ %ret = load i8, i8* %tmp, align 2
ret i8 %ret
}
@@ -39,7 +39,7 @@ define i64 @zext32(i8* %a) nounwind ssp
; CHECK-NEXT: ret
%p = getelementptr inbounds i8, i8* %a, i64 -12
%tmp1 = bitcast i8* %p to i32*
- %tmp2 = load i32* %tmp1, align 4
+ %tmp2 = load i32, i32* %tmp1, align 4
%ret = zext i32 %tmp2 to i64
ret i64 %ret
@@ -50,7 +50,7 @@ define i64 @zext16(i8* %a) nounwind ssp
; CHECK-NEXT: ret
%p = getelementptr inbounds i8, i8* %a, i64 -12
%tmp1 = bitcast i8* %p to i16*
- %tmp2 = load i16* %tmp1, align 2
+ %tmp2 = load i16, i16* %tmp1, align 2
%ret = zext i16 %tmp2 to i64
ret i64 %ret
@@ -60,7 +60,7 @@ define i64 @zext8(i8* %a) nounwind ssp {
; CHECK: ldurb w0, [x0, #-12]
; CHECK-NEXT: ret
%p = getelementptr inbounds i8, i8* %a, i64 -12
- %tmp2 = load i8* %p, align 1
+ %tmp2 = load i8, i8* %p, align 1
%ret = zext i8 %tmp2 to i64
ret i64 %ret
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A53.ll Fri Feb 27 15:17:42 2015
@@ -34,44 +34,44 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %2 = load i32* %i, align 4
+ %2 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %2, 8
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %3 = load i32* %i, align 4
+ %3 = load i32, i32* %i, align 4
%idxprom = sext i32 %3 to i64
%arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom
- %4 = load i32* %arrayidx, align 4
+ %4 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %4, 1
store i32 %add, i32* %xx, align 4
- %5 = load i32* %xx, align 4
+ %5 = load i32, i32* %xx, align 4
%add1 = add nsw i32 %5, 12
store i32 %add1, i32* %xx, align 4
- %6 = load i32* %xx, align 4
+ %6 = load i32, i32* %xx, align 4
%add2 = add nsw i32 %6, 23
store i32 %add2, i32* %xx, align 4
- %7 = load i32* %xx, align 4
+ %7 = load i32, i32* %xx, align 4
%add3 = add nsw i32 %7, 34
store i32 %add3, i32* %xx, align 4
- %8 = load i32* %i, align 4
+ %8 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %8 to i64
%arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4
- %9 = load i32* %arrayidx5, align 4
- %10 = load i32* %yy, align 4
+ %9 = load i32, i32* %arrayidx5, align 4
+ %10 = load i32, i32* %yy, align 4
%mul = mul nsw i32 %10, %9
store i32 %mul, i32* %yy, align 4
br label %for.inc
for.inc: ; preds = %for.body
- %11 = load i32* %i, align 4
+ %11 = load i32, i32* %i, align 4
%inc = add nsw i32 %11, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %12 = load i32* %xx, align 4
- %13 = load i32* %yy, align 4
+ %12 = load i32, i32* %xx, align 4
+ %13 = load i32, i32* %yy, align 4
%add6 = add nsw i32 %12, %13
ret i32 %add6
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-misched-basic-A57.ll Fri Feb 27 15:17:42 2015
@@ -41,31 +41,31 @@ entry:
br label %for.cond
for.cond: ; preds = %for.inc, %entry
- %2 = load i32* %i, align 4
+ %2 = load i32, i32* %i, align 4
%cmp = icmp slt i32 %2, 8
br i1 %cmp, label %for.body, label %for.end
for.body: ; preds = %for.cond
- %3 = load i32* %yy, align 4
- %4 = load i32* %i, align 4
+ %3 = load i32, i32* %yy, align 4
+ %4 = load i32, i32* %i, align 4
%idxprom = sext i32 %4 to i64
%arrayidx = getelementptr inbounds [8 x i32], [8 x i32]* %x, i32 0, i64 %idxprom
- %5 = load i32* %arrayidx, align 4
+ %5 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %5, 1
store i32 %add, i32* %xx, align 4
- %6 = load i32* %xx, align 4
+ %6 = load i32, i32* %xx, align 4
%add1 = add nsw i32 %6, 12
store i32 %add1, i32* %xx, align 4
- %7 = load i32* %xx, align 4
+ %7 = load i32, i32* %xx, align 4
%add2 = add nsw i32 %7, 23
store i32 %add2, i32* %xx, align 4
- %8 = load i32* %xx, align 4
+ %8 = load i32, i32* %xx, align 4
%add3 = add nsw i32 %8, 34
store i32 %add3, i32* %xx, align 4
- %9 = load i32* %i, align 4
+ %9 = load i32, i32* %i, align 4
%idxprom4 = sext i32 %9 to i64
%arrayidx5 = getelementptr inbounds [8 x i32], [8 x i32]* %y, i32 0, i64 %idxprom4
- %10 = load i32* %arrayidx5, align 4
+ %10 = load i32, i32* %arrayidx5, align 4
%add4 = add nsw i32 %9, %add
%add5 = add nsw i32 %10, %add1
@@ -92,14 +92,14 @@ for.body:
br label %for.inc
for.inc: ; preds = %for.body
- %11 = load i32* %i, align 4
+ %11 = load i32, i32* %i, align 4
%inc = add nsw i32 %11, 1
store i32 %inc, i32* %i, align 4
br label %for.cond
for.end: ; preds = %for.cond
- %12 = load i32* %xx, align 4
- %13 = load i32* %yy, align 4
+ %12 = load i32, i32* %xx, align 4
+ %13 = load i32, i32* %yy, align 4
%add67 = add nsw i32 %12, %13
ret i32 %add67
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-neon-simd-ldst-one.ll Fri Feb 27 15:17:42 2015
@@ -126,7 +126,7 @@ define <16 x i8> @test_vld1q_dup_s8(i8*
; CHECK-LABEL: test_vld1q_dup_s8:
; CHECK: ld1r {{{ ?v[0-9]+.16b ?}}}, [x0]
entry:
- %0 = load i8* %a, align 1
+ %0 = load i8, i8* %a, align 1
%1 = insertelement <16 x i8> undef, i8 %0, i32 0
%lane = shufflevector <16 x i8> %1, <16 x i8> undef, <16 x i32> zeroinitializer
ret <16 x i8> %lane
@@ -136,7 +136,7 @@ define <8 x i16> @test_vld1q_dup_s16(i16
; CHECK-LABEL: test_vld1q_dup_s16:
; CHECK: ld1r {{{ ?v[0-9]+.8h ?}}}, [x0]
entry:
- %0 = load i16* %a, align 2
+ %0 = load i16, i16* %a, align 2
%1 = insertelement <8 x i16> undef, i16 %0, i32 0
%lane = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
ret <8 x i16> %lane
@@ -146,7 +146,7 @@ define <4 x i32> @test_vld1q_dup_s32(i32
; CHECK-LABEL: test_vld1q_dup_s32:
; CHECK: ld1r {{{ ?v[0-9]+.4s ?}}}, [x0]
entry:
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%1 = insertelement <4 x i32> undef, i32 %0, i32 0
%lane = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> zeroinitializer
ret <4 x i32> %lane
@@ -156,7 +156,7 @@ define <2 x i64> @test_vld1q_dup_s64(i64
; CHECK-LABEL: test_vld1q_dup_s64:
; CHECK: ld1r {{{ ?v[0-9]+.2d ?}}}, [x0]
entry:
- %0 = load i64* %a, align 8
+ %0 = load i64, i64* %a, align 8
%1 = insertelement <2 x i64> undef, i64 %0, i32 0
%lane = shufflevector <2 x i64> %1, <2 x i64> undef, <2 x i32> zeroinitializer
ret <2 x i64> %lane
@@ -166,7 +166,7 @@ define <4 x float> @test_vld1q_dup_f32(f
; CHECK-LABEL: test_vld1q_dup_f32:
; CHECK: ld1r {{{ ?v[0-9]+.4s ?}}}, [x0]
entry:
- %0 = load float* %a, align 4
+ %0 = load float, float* %a, align 4
%1 = insertelement <4 x float> undef, float %0, i32 0
%lane = shufflevector <4 x float> %1, <4 x float> undef, <4 x i32> zeroinitializer
ret <4 x float> %lane
@@ -176,7 +176,7 @@ define <2 x double> @test_vld1q_dup_f64(
; CHECK-LABEL: test_vld1q_dup_f64:
; CHECK: ld1r {{{ ?v[0-9]+.2d ?}}}, [x0]
entry:
- %0 = load double* %a, align 8
+ %0 = load double, double* %a, align 8
%1 = insertelement <2 x double> undef, double %0, i32 0
%lane = shufflevector <2 x double> %1, <2 x double> undef, <2 x i32> zeroinitializer
ret <2 x double> %lane
@@ -186,7 +186,7 @@ define <8 x i8> @test_vld1_dup_s8(i8* %a
; CHECK-LABEL: test_vld1_dup_s8:
; CHECK: ld1r {{{ ?v[0-9]+.8b ?}}}, [x0]
entry:
- %0 = load i8* %a, align 1
+ %0 = load i8, i8* %a, align 1
%1 = insertelement <8 x i8> undef, i8 %0, i32 0
%lane = shufflevector <8 x i8> %1, <8 x i8> undef, <8 x i32> zeroinitializer
ret <8 x i8> %lane
@@ -196,7 +196,7 @@ define <4 x i16> @test_vld1_dup_s16(i16*
; CHECK-LABEL: test_vld1_dup_s16:
; CHECK: ld1r {{{ ?v[0-9]+.4h ?}}}, [x0]
entry:
- %0 = load i16* %a, align 2
+ %0 = load i16, i16* %a, align 2
%1 = insertelement <4 x i16> undef, i16 %0, i32 0
%lane = shufflevector <4 x i16> %1, <4 x i16> undef, <4 x i32> zeroinitializer
ret <4 x i16> %lane
@@ -206,7 +206,7 @@ define <2 x i32> @test_vld1_dup_s32(i32*
; CHECK-LABEL: test_vld1_dup_s32:
; CHECK: ld1r {{{ ?v[0-9]+.2s ?}}}, [x0]
entry:
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%1 = insertelement <2 x i32> undef, i32 %0, i32 0
%lane = shufflevector <2 x i32> %1, <2 x i32> undef, <2 x i32> zeroinitializer
ret <2 x i32> %lane
@@ -216,7 +216,7 @@ define <1 x i64> @test_vld1_dup_s64(i64*
; CHECK-LABEL: test_vld1_dup_s64:
; CHECK: ldr {{d[0-9]+}}, [x0]
entry:
- %0 = load i64* %a, align 8
+ %0 = load i64, i64* %a, align 8
%1 = insertelement <1 x i64> undef, i64 %0, i32 0
ret <1 x i64> %1
}
@@ -225,7 +225,7 @@ define <2 x float> @test_vld1_dup_f32(fl
; CHECK-LABEL: test_vld1_dup_f32:
; CHECK: ld1r {{{ ?v[0-9]+.2s ?}}}, [x0]
entry:
- %0 = load float* %a, align 4
+ %0 = load float, float* %a, align 4
%1 = insertelement <2 x float> undef, float %0, i32 0
%lane = shufflevector <2 x float> %1, <2 x float> undef, <2 x i32> zeroinitializer
ret <2 x float> %lane
@@ -235,7 +235,7 @@ define <1 x double> @test_vld1_dup_f64(d
; CHECK-LABEL: test_vld1_dup_f64:
; CHECK: ldr {{d[0-9]+}}, [x0]
entry:
- %0 = load double* %a, align 8
+ %0 = load double, double* %a, align 8
%1 = insertelement <1 x double> undef, double %0, i32 0
ret <1 x double> %1
}
@@ -247,7 +247,7 @@ define <1 x i64> @testDUP.v1i64(i64* %a,
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}]
; CHECK-DAG: fmov {{d[0-9]+}}, {{x[0-9]+}}
; CHECK-DAG: str {{x[0-9]+}}, [{{x[0-9]+}}]
- %1 = load i64* %a, align 8
+ %1 = load i64, i64* %a, align 8
store i64 %1, i64* %b, align 8
%vecinit.i = insertelement <1 x i64> undef, i64 %1, i32 0
ret <1 x i64> %vecinit.i
@@ -259,7 +259,7 @@ define <1 x double> @testDUP.v1f64(doubl
; CHECK-LABEL: testDUP.v1f64:
; CHECK: ldr {{d[0-9]+}}, [{{x[0-9]+}}]
; CHECK: str {{d[0-9]+}}, [{{x[0-9]+}}]
- %1 = load double* %a, align 8
+ %1 = load double, double* %a, align 8
store double %1, double* %b, align 8
%vecinit.i = insertelement <1 x double> undef, double %1, i32 0
ret <1 x double> %vecinit.i
@@ -269,7 +269,7 @@ define <16 x i8> @test_vld1q_lane_s8(i8*
; CHECK-LABEL: test_vld1q_lane_s8:
; CHECK: ld1 { {{v[0-9]+}}.b }[{{[0-9]+}}], [x0]
entry:
- %0 = load i8* %a, align 1
+ %0 = load i8, i8* %a, align 1
%vld1_lane = insertelement <16 x i8> %b, i8 %0, i32 15
ret <16 x i8> %vld1_lane
}
@@ -278,7 +278,7 @@ define <8 x i16> @test_vld1q_lane_s16(i1
; CHECK-LABEL: test_vld1q_lane_s16:
; CHECK: ld1 { {{v[0-9]+}}.h }[{{[0-9]+}}], [x0]
entry:
- %0 = load i16* %a, align 2
+ %0 = load i16, i16* %a, align 2
%vld1_lane = insertelement <8 x i16> %b, i16 %0, i32 7
ret <8 x i16> %vld1_lane
}
@@ -287,7 +287,7 @@ define <4 x i32> @test_vld1q_lane_s32(i3
; CHECK-LABEL: test_vld1q_lane_s32:
; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
entry:
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%vld1_lane = insertelement <4 x i32> %b, i32 %0, i32 3
ret <4 x i32> %vld1_lane
}
@@ -296,7 +296,7 @@ define <2 x i64> @test_vld1q_lane_s64(i6
; CHECK-LABEL: test_vld1q_lane_s64:
; CHECK: ld1 { {{v[0-9]+}}.d }[{{[0-9]+}}], [x0]
entry:
- %0 = load i64* %a, align 8
+ %0 = load i64, i64* %a, align 8
%vld1_lane = insertelement <2 x i64> %b, i64 %0, i32 1
ret <2 x i64> %vld1_lane
}
@@ -305,7 +305,7 @@ define <4 x float> @test_vld1q_lane_f32(
; CHECK-LABEL: test_vld1q_lane_f32:
; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
entry:
- %0 = load float* %a, align 4
+ %0 = load float, float* %a, align 4
%vld1_lane = insertelement <4 x float> %b, float %0, i32 3
ret <4 x float> %vld1_lane
}
@@ -314,7 +314,7 @@ define <2 x double> @test_vld1q_lane_f64
; CHECK-LABEL: test_vld1q_lane_f64:
; CHECK: ld1 { {{v[0-9]+}}.d }[{{[0-9]+}}], [x0]
entry:
- %0 = load double* %a, align 8
+ %0 = load double, double* %a, align 8
%vld1_lane = insertelement <2 x double> %b, double %0, i32 1
ret <2 x double> %vld1_lane
}
@@ -323,7 +323,7 @@ define <8 x i8> @test_vld1_lane_s8(i8* %
; CHECK-LABEL: test_vld1_lane_s8:
; CHECK: ld1 { {{v[0-9]+}}.b }[{{[0-9]+}}], [x0]
entry:
- %0 = load i8* %a, align 1
+ %0 = load i8, i8* %a, align 1
%vld1_lane = insertelement <8 x i8> %b, i8 %0, i32 7
ret <8 x i8> %vld1_lane
}
@@ -332,7 +332,7 @@ define <4 x i16> @test_vld1_lane_s16(i16
; CHECK-LABEL: test_vld1_lane_s16:
; CHECK: ld1 { {{v[0-9]+}}.h }[{{[0-9]+}}], [x0]
entry:
- %0 = load i16* %a, align 2
+ %0 = load i16, i16* %a, align 2
%vld1_lane = insertelement <4 x i16> %b, i16 %0, i32 3
ret <4 x i16> %vld1_lane
}
@@ -341,7 +341,7 @@ define <2 x i32> @test_vld1_lane_s32(i32
; CHECK-LABEL: test_vld1_lane_s32:
; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
entry:
- %0 = load i32* %a, align 4
+ %0 = load i32, i32* %a, align 4
%vld1_lane = insertelement <2 x i32> %b, i32 %0, i32 1
ret <2 x i32> %vld1_lane
}
@@ -350,7 +350,7 @@ define <1 x i64> @test_vld1_lane_s64(i64
; CHECK-LABEL: test_vld1_lane_s64:
; CHECK: ldr {{d[0-9]+}}, [x0]
entry:
- %0 = load i64* %a, align 8
+ %0 = load i64, i64* %a, align 8
%vld1_lane = insertelement <1 x i64> undef, i64 %0, i32 0
ret <1 x i64> %vld1_lane
}
@@ -359,7 +359,7 @@ define <2 x float> @test_vld1_lane_f32(f
; CHECK-LABEL: test_vld1_lane_f32:
; CHECK: ld1 { {{v[0-9]+}}.s }[{{[0-9]+}}], [x0]
entry:
- %0 = load float* %a, align 4
+ %0 = load float, float* %a, align 4
%vld1_lane = insertelement <2 x float> %b, float %0, i32 1
ret <2 x float> %vld1_lane
}
@@ -368,7 +368,7 @@ define <1 x double> @test_vld1_lane_f64(
; CHECK-LABEL: test_vld1_lane_f64:
; CHECK: ldr {{d[0-9]+}}, [x0]
entry:
- %0 = load double* %a, align 8
+ %0 = load double, double* %a, align 8
%vld1_lane = insertelement <1 x double> undef, double %0, i32 0
ret <1 x double> %vld1_lane
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint-scratch-regs.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@
; CHECK-NEXT: Ltmp
; CHECK-NEXT: nop
define void @clobberScratch(i32* %p) {
- %v = load i32* %p
+ %v = load i32, i32* %p
tail call void asm sideeffect "nop", "~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{x29},~{x30},~{x31}"() nounwind
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 5, i32 20, i8* null, i32 0, i32* %p, i32 %v)
store i32 %v, i32* %p
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-patchpoint.ll Fri Feb 27 15:17:42 2015
@@ -50,13 +50,13 @@ define i64 @testLowerConstant(i64 %arg,
entry:
%tmp80 = add i64 %tmp79, -16
%tmp81 = inttoptr i64 %tmp80 to i64*
- %tmp82 = load i64* %tmp81, align 8
+ %tmp82 = load i64, i64* %tmp81, align 8
tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 14, i32 8, i64 %arg, i64 %tmp2, i64 %tmp10, i64 %tmp82)
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 15, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp82)
- %tmp83 = load i64* %tmp33, align 8
+ %tmp83 = load i64, i64* %tmp33, align 8
%tmp84 = add i64 %tmp83, -24
%tmp85 = inttoptr i64 %tmp84 to i64*
- %tmp86 = load i64* %tmp85, align 8
+ %tmp86 = load i64, i64* %tmp85, align 8
tail call void (i64, i32, ...)* @llvm.experimental.stackmap(i64 17, i32 8, i64 %arg, i64 %tmp10, i64 %tmp86)
tail call void (i64, i32, i8*, i32, ...)* @llvm.experimental.patchpoint.void(i64 18, i32 32, i8* null, i32 3, i64 %arg, i64 %tmp10, i64 %tmp86)
ret i64 10
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-pic-local-symbol.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-pic-local-symbol.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-pic-local-symbol.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-pic-local-symbol.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ define i32 @get() {
; CHECK: get:
; CHECK: adrp x{{[0-9]+}}, a
; CHECK-NEXT: ldr w{{[0-9]+}}, [x{{[0-9]}}, :lo12:a]
- %res = load i32* @a, align 4
+ %res = load i32, i32* @a, align 4
ret i32 %res
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-platform-reg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-platform-reg.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-platform-reg.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-platform-reg.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
@var = global [30 x i64] zeroinitializer
define void @keep_live() {
- %val = load volatile [30 x i64]* @var
+ %val = load volatile [30 x i64], [30 x i64]* @var
store volatile [30 x i64] %val, [30 x i64]* @var
; CHECK: ldr x18
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-prefetch.ll Fri Feb 27 15:17:42 2015
@@ -35,78 +35,78 @@ entry:
; CHECK: prfum pstl1keep
call void @llvm.prefetch(i8* %tmp, i32 1, i32 3, i32 1)
- %tmp1 = load i32* %j.addr, align 4, !tbaa !0
+ %tmp1 = load i32, i32* %j.addr, align 4, !tbaa !0
%add = add nsw i32 %tmp1, %i
%idxprom = sext i32 %add to i64
- %tmp2 = load i32** @a, align 8, !tbaa !3
+ %tmp2 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx = getelementptr inbounds i32, i32* %tmp2, i64 %idxprom
%tmp3 = bitcast i32* %arrayidx to i8*
; CHECK: prfm pldl1strm
call void @llvm.prefetch(i8* %tmp3, i32 0, i32 0, i32 1)
- %tmp4 = load i32** @a, align 8, !tbaa !3
+ %tmp4 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx3 = getelementptr inbounds i32, i32* %tmp4, i64 %idxprom
%tmp5 = bitcast i32* %arrayidx3 to i8*
; CHECK: prfm pldl3keep
call void @llvm.prefetch(i8* %tmp5, i32 0, i32 1, i32 1)
- %tmp6 = load i32** @a, align 8, !tbaa !3
+ %tmp6 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx6 = getelementptr inbounds i32, i32* %tmp6, i64 %idxprom
%tmp7 = bitcast i32* %arrayidx6 to i8*
; CHECK: prfm pldl2keep
call void @llvm.prefetch(i8* %tmp7, i32 0, i32 2, i32 1)
- %tmp8 = load i32** @a, align 8, !tbaa !3
+ %tmp8 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx9 = getelementptr inbounds i32, i32* %tmp8, i64 %idxprom
%tmp9 = bitcast i32* %arrayidx9 to i8*
; CHECK: prfm pldl1keep
call void @llvm.prefetch(i8* %tmp9, i32 0, i32 3, i32 1)
- %tmp10 = load i32** @a, align 8, !tbaa !3
+ %tmp10 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx12 = getelementptr inbounds i32, i32* %tmp10, i64 %idxprom
%tmp11 = bitcast i32* %arrayidx12 to i8*
; CHECK: prfm plil1strm
call void @llvm.prefetch(i8* %tmp11, i32 0, i32 0, i32 0)
- %tmp12 = load i32** @a, align 8, !tbaa !3
+ %tmp12 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx15 = getelementptr inbounds i32, i32* %tmp12, i64 %idxprom
%tmp13 = bitcast i32* %arrayidx3 to i8*
; CHECK: prfm plil3keep
call void @llvm.prefetch(i8* %tmp13, i32 0, i32 1, i32 0)
- %tmp14 = load i32** @a, align 8, !tbaa !3
+ %tmp14 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx18 = getelementptr inbounds i32, i32* %tmp14, i64 %idxprom
%tmp15 = bitcast i32* %arrayidx6 to i8*
; CHECK: prfm plil2keep
call void @llvm.prefetch(i8* %tmp15, i32 0, i32 2, i32 0)
- %tmp16 = load i32** @a, align 8, !tbaa !3
+ %tmp16 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx21 = getelementptr inbounds i32, i32* %tmp16, i64 %idxprom
%tmp17 = bitcast i32* %arrayidx9 to i8*
; CHECK: prfm plil1keep
call void @llvm.prefetch(i8* %tmp17, i32 0, i32 3, i32 0)
- %tmp18 = load i32** @a, align 8, !tbaa !3
+ %tmp18 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx24 = getelementptr inbounds i32, i32* %tmp18, i64 %idxprom
%tmp19 = bitcast i32* %arrayidx12 to i8*
; CHECK: prfm pstl1strm
call void @llvm.prefetch(i8* %tmp19, i32 1, i32 0, i32 1)
- %tmp20 = load i32** @a, align 8, !tbaa !3
+ %tmp20 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx27 = getelementptr inbounds i32, i32* %tmp20, i64 %idxprom
%tmp21 = bitcast i32* %arrayidx15 to i8*
; CHECK: prfm pstl3keep
call void @llvm.prefetch(i8* %tmp21, i32 1, i32 1, i32 1)
- %tmp22 = load i32** @a, align 8, !tbaa !3
+ %tmp22 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx30 = getelementptr inbounds i32, i32* %tmp22, i64 %idxprom
%tmp23 = bitcast i32* %arrayidx18 to i8*
; CHECK: prfm pstl2keep
call void @llvm.prefetch(i8* %tmp23, i32 1, i32 2, i32 1)
- %tmp24 = load i32** @a, align 8, !tbaa !3
+ %tmp24 = load i32*, i32** @a, align 8, !tbaa !3
%arrayidx33 = getelementptr inbounds i32, i32* %tmp24, i64 %idxprom
%tmp25 = bitcast i32* %arrayidx21 to i8*
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-redzone.ll Fri Feb 27 15:17:42 2015
@@ -9,10 +9,10 @@ define i32 @foo(i32 %a, i32 %b) nounwind
%x = alloca i32, align 4
store i32 %a, i32* %a.addr, align 4
store i32 %b, i32* %b.addr, align 4
- %tmp = load i32* %a.addr, align 4
- %tmp1 = load i32* %b.addr, align 4
+ %tmp = load i32, i32* %a.addr, align 4
+ %tmp1 = load i32, i32* %b.addr, align 4
%add = add nsw i32 %tmp, %tmp1
store i32 %add, i32* %x, align 4
- %tmp2 = load i32* %x, align 4
+ %tmp2 = load i32, i32* %x, align 4
ret i32 %tmp2
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-register-offset-addressing.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-register-offset-addressing.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-register-offset-addressing.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-register-offset-addressing.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@ define i8 @test_64bit_add(i16* %a, i64 %
; CHECK: ldrb w0, [x0, [[REG]]]
; CHECK: ret
%tmp1 = getelementptr inbounds i16, i16* %a, i64 %b
- %tmp2 = load i16* %tmp1
+ %tmp2 = load i16, i16* %tmp1
%tmp3 = trunc i16 %tmp2 to i8
ret i8 %tmp3
}
@@ -19,7 +19,7 @@ define void @ldst_8bit(i8* %base, i64 %o
%off32.sext.tmp = shl i64 %offset, 32
%off32.sext = ashr i64 %off32.sext.tmp, 32
%addr8_sxtw = getelementptr i8, i8* %base, i64 %off32.sext
- %val8_sxtw = load volatile i8* %addr8_sxtw
+ %val8_sxtw = load volatile i8, i8* %addr8_sxtw
%val32_signed = sext i8 %val8_sxtw to i32
store volatile i32 %val32_signed, i32* @var_32bit
; CHECK: ldrsb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
@@ -28,7 +28,7 @@ define void @ldst_8bit(i8* %base, i64 %o
%offset_uxtw = and i64 %offset, 4294967295
%addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
%addr_uxtw = inttoptr i64 %addrint1_uxtw to i8*
- %val8_uxtw = load volatile i8* %addr_uxtw
+ %val8_uxtw = load volatile i8, i8* %addr_uxtw
%newval8 = add i8 %val8_uxtw, 1
store volatile i8 %newval8, i8* @var_8bit
; CHECK: ldrb {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
@@ -44,7 +44,7 @@ define void @ldst_16bit(i16* %base, i64
%offset_uxtw = and i64 %offset, 4294967295
%addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
%addr_uxtw = inttoptr i64 %addrint1_uxtw to i16*
- %val8_uxtw = load volatile i16* %addr_uxtw
+ %val8_uxtw = load volatile i16, i16* %addr_uxtw
%newval8 = add i16 %val8_uxtw, 1
store volatile i16 %newval8, i16* @var_16bit
; CHECK: ldrh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
@@ -54,7 +54,7 @@ define void @ldst_16bit(i16* %base, i64
%offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
%addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
%addr_sxtw = inttoptr i64 %addrint_sxtw to i16*
- %val16_sxtw = load volatile i16* %addr_sxtw
+ %val16_sxtw = load volatile i16, i16* %addr_sxtw
%val64_signed = sext i16 %val16_sxtw to i64
store volatile i64 %val64_signed, i64* @var_64bit
; CHECK: ldrsh {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
@@ -65,7 +65,7 @@ define void @ldst_16bit(i16* %base, i64
%offset2_uxtwN = shl i64 %offset_uxtwN, 1
%addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to i16*
- %val32 = load volatile i32* @var_32bit
+ %val32 = load volatile i32, i32* @var_32bit
%val16_trunc32 = trunc i32 %val32 to i16
store volatile i16 %val16_trunc32, i16* %addr_uxtwN
; CHECK: strh {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #1]
@@ -79,7 +79,7 @@ define void @ldst_32bit(i32* %base, i64
%offset_uxtw = and i64 %offset, 4294967295
%addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
%addr_uxtw = inttoptr i64 %addrint1_uxtw to i32*
- %val32_uxtw = load volatile i32* %addr_uxtw
+ %val32_uxtw = load volatile i32, i32* %addr_uxtw
%newval32 = add i32 %val32_uxtw, 1
store volatile i32 %newval32, i32* @var_32bit
; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
@@ -89,7 +89,7 @@ define void @ldst_32bit(i32* %base, i64
%offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
%addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
%addr_sxtw = inttoptr i64 %addrint_sxtw to i32*
- %val32_sxtw = load volatile i32* %addr_sxtw
+ %val32_sxtw = load volatile i32, i32* %addr_sxtw
%val64_signed = sext i32 %val32_sxtw to i64
store volatile i64 %val64_signed, i64* @var_64bit
; CHECK: ldrsw {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
@@ -100,7 +100,7 @@ define void @ldst_32bit(i32* %base, i64
%offset2_uxtwN = shl i64 %offset_uxtwN, 2
%addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to i32*
- %val32 = load volatile i32* @var_32bit
+ %val32 = load volatile i32, i32* @var_32bit
store volatile i32 %val32, i32* %addr_uxtwN
; CHECK: str {{w[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #2]
ret void
@@ -113,7 +113,7 @@ define void @ldst_64bit(i64* %base, i64
%offset_uxtw = and i64 %offset, 4294967295
%addrint1_uxtw = add i64 %addrint_uxtw, %offset_uxtw
%addr_uxtw = inttoptr i64 %addrint1_uxtw to i64*
- %val64_uxtw = load volatile i64* %addr_uxtw
+ %val64_uxtw = load volatile i64, i64* %addr_uxtw
%newval8 = add i64 %val64_uxtw, 1
store volatile i64 %newval8, i64* @var_64bit
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw]
@@ -123,7 +123,7 @@ define void @ldst_64bit(i64* %base, i64
%offset_sxtw = ashr i64 %offset_sxtw.tmp, 32
%addrint_sxtw = add i64 %base_sxtw, %offset_sxtw
%addr_sxtw = inttoptr i64 %addrint_sxtw to i64*
- %val64_sxtw = load volatile i64* %addr_sxtw
+ %val64_sxtw = load volatile i64, i64* %addr_sxtw
store volatile i64 %val64_sxtw, i64* @var_64bit
; CHECK: ldr {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, sxtw]
@@ -133,7 +133,7 @@ define void @ldst_64bit(i64* %base, i64
%offset2_uxtwN = shl i64 %offset_uxtwN, 3
%addrint_uxtwN = add i64 %base_uxtwN, %offset2_uxtwN
%addr_uxtwN = inttoptr i64 %addrint_uxtwN to i64*
- %val64 = load volatile i64* @var_64bit
+ %val64 = load volatile i64, i64* @var_64bit
store volatile i64 %val64, i64* %addr_uxtwN
; CHECK: str {{x[0-9]+}}, [{{x[0-9]+}}, {{w[0-9]+}}, uxtw #3]
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-regress-interphase-shift.ll Fri Feb 27 15:17:42 2015
@@ -11,7 +11,7 @@
define void @foo(i64* nocapture %d) {
; CHECK-LABEL: foo:
; CHECK: rorv
- %tmp = load i64* undef, align 8
+ %tmp = load i64, i64* undef, align 8
%sub397 = sub i64 0, %tmp
%and398 = and i64 %sub397, 4294967295
%shr404 = lshr i64 %and398, 0
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-return-vector.ll Fri Feb 27 15:17:42 2015
@@ -6,6 +6,6 @@ define <2 x double> @test(<2 x double>*
; CHECK: test
; CHECK: ldr q0, [x0]
; CHECK: ret
- %tmp1 = load <2 x double>* %p, align 16
+ %tmp1 = load <2 x double>, <2 x double>* %p, align 16
ret <2 x double> %tmp1
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-rev.ll Fri Feb 27 15:17:42 2015
@@ -64,7 +64,7 @@ entry:
define <8 x i8> @test_vrev64D8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev64D8:
;CHECK: rev64.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <8 x i8> %tmp2
}
@@ -72,7 +72,7 @@ define <8 x i8> @test_vrev64D8(<8 x i8>*
define <4 x i16> @test_vrev64D16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev64D16:
;CHECK: rev64.4h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x i16> %tmp2
}
@@ -80,7 +80,7 @@ define <4 x i16> @test_vrev64D16(<4 x i1
define <2 x i32> @test_vrev64D32(<2 x i32>* %A) nounwind {
;CHECK-LABEL: test_vrev64D32:
;CHECK: rev64.2s
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
ret <2 x i32> %tmp2
}
@@ -88,7 +88,7 @@ define <2 x i32> @test_vrev64D32(<2 x i3
define <2 x float> @test_vrev64Df(<2 x float>* %A) nounwind {
;CHECK-LABEL: test_vrev64Df:
;CHECK: rev64.2s
- %tmp1 = load <2 x float>* %A
+ %tmp1 = load <2 x float>, <2 x float>* %A
%tmp2 = shufflevector <2 x float> %tmp1, <2 x float> undef, <2 x i32> <i32 1, i32 0>
ret <2 x float> %tmp2
}
@@ -96,7 +96,7 @@ define <2 x float> @test_vrev64Df(<2 x f
define <16 x i8> @test_vrev64Q8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev64Q8:
;CHECK: rev64.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8>
ret <16 x i8> %tmp2
}
@@ -104,7 +104,7 @@ define <16 x i8> @test_vrev64Q8(<16 x i8
define <8 x i16> @test_vrev64Q16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev64Q16:
;CHECK: rev64.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
ret <8 x i16> %tmp2
}
@@ -112,7 +112,7 @@ define <8 x i16> @test_vrev64Q16(<8 x i1
define <4 x i32> @test_vrev64Q32(<4 x i32>* %A) nounwind {
;CHECK-LABEL: test_vrev64Q32:
;CHECK: rev64.4s
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp2 = shufflevector <4 x i32> %tmp1, <4 x i32> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i32> %tmp2
}
@@ -120,7 +120,7 @@ define <4 x i32> @test_vrev64Q32(<4 x i3
define <4 x float> @test_vrev64Qf(<4 x float>* %A) nounwind {
;CHECK-LABEL: test_vrev64Qf:
;CHECK: rev64.4s
- %tmp1 = load <4 x float>* %A
+ %tmp1 = load <4 x float>, <4 x float>* %A
%tmp2 = shufflevector <4 x float> %tmp1, <4 x float> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x float> %tmp2
}
@@ -128,7 +128,7 @@ define <4 x float> @test_vrev64Qf(<4 x f
define <8 x i8> @test_vrev32D8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev32D8:
;CHECK: rev32.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4>
ret <8 x i8> %tmp2
}
@@ -136,7 +136,7 @@ define <8 x i8> @test_vrev32D8(<8 x i8>*
define <4 x i16> @test_vrev32D16(<4 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev32D16:
;CHECK: rev32.4h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = shufflevector <4 x i16> %tmp1, <4 x i16> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
ret <4 x i16> %tmp2
}
@@ -144,7 +144,7 @@ define <4 x i16> @test_vrev32D16(<4 x i1
define <16 x i8> @test_vrev32Q8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev32Q8:
;CHECK: rev32.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4, i32 11, i32 10, i32 9, i32 8, i32 15, i32 14, i32 13, i32 12>
ret <16 x i8> %tmp2
}
@@ -152,7 +152,7 @@ define <16 x i8> @test_vrev32Q8(<16 x i8
define <8 x i16> @test_vrev32Q16(<8 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev32Q16:
;CHECK: rev32.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
ret <8 x i16> %tmp2
}
@@ -160,7 +160,7 @@ define <8 x i16> @test_vrev32Q16(<8 x i1
define <8 x i8> @test_vrev16D8(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev16D8:
;CHECK: rev16.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6>
ret <8 x i8> %tmp2
}
@@ -168,7 +168,7 @@ define <8 x i8> @test_vrev16D8(<8 x i8>*
define <16 x i8> @test_vrev16Q8(<16 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev16Q8:
;CHECK: rev16.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp2 = shufflevector <16 x i8> %tmp1, <16 x i8> undef, <16 x i32> <i32 1, i32 0, i32 3, i32 2, i32 5, i32 4, i32 7, i32 6, i32 9, i32 8, i32 11, i32 10, i32 13, i32 12, i32 15, i32 14>
ret <16 x i8> %tmp2
}
@@ -178,7 +178,7 @@ define <16 x i8> @test_vrev16Q8(<16 x i8
define <8 x i8> @test_vrev64D8_undef(<8 x i8>* %A) nounwind {
;CHECK-LABEL: test_vrev64D8_undef:
;CHECK: rev64.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 7, i32 undef, i32 undef, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <8 x i8> %tmp2
}
@@ -186,7 +186,7 @@ define <8 x i8> @test_vrev64D8_undef(<8
define <8 x i16> @test_vrev32Q16_undef(<8 x i16>* %A) nounwind {
;CHECK-LABEL: test_vrev32Q16_undef:
;CHECK: rev32.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <8 x i32> <i32 undef, i32 0, i32 undef, i32 2, i32 5, i32 4, i32 7, i32 undef>
ret <8 x i16> %tmp2
}
@@ -199,7 +199,7 @@ define void @test_vrev64(<4 x i16>* noca
; CHECK: st1.h
entry:
%0 = bitcast <4 x i16>* %source to <8 x i16>*
- %tmp2 = load <8 x i16>* %0, align 4
+ %tmp2 = load <8 x i16>, <8 x i16>* %0, align 4
%tmp3 = extractelement <8 x i16> %tmp2, i32 6
%tmp5 = insertelement <2 x i16> undef, i16 %tmp3, i32 0
%tmp9 = extractelement <8 x i16> %tmp2, i32 5
@@ -215,7 +215,7 @@ define void @float_vrev64(float* nocaptu
; CHECK: rev64.4s
entry:
%0 = bitcast float* %source to <4 x float>*
- %tmp2 = load <4 x float>* %0, align 4
+ %tmp2 = load <4 x float>, <4 x float>* %0, align 4
%tmp5 = shufflevector <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x float> %tmp2, <4 x i32> <i32 0, i32 7, i32 0, i32 0>
%arrayidx8 = getelementptr inbounds <4 x float>, <4 x float>* %dest, i32 11
store <4 x float> %tmp5, <4 x float>* %arrayidx8, align 4
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-scaled_iv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-scaled_iv.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-scaled_iv.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-scaled_iv.ll Fri Feb 27 15:17:42 2015
@@ -18,12 +18,12 @@ for.body:
%indvars.iv = phi i64 [ 1, %entry ], [ %indvars.iv.next, %for.body ]
%tmp = add nsw i64 %indvars.iv, -1
%arrayidx = getelementptr inbounds double, double* %b, i64 %tmp
- %tmp1 = load double* %arrayidx, align 8
+ %tmp1 = load double, double* %arrayidx, align 8
; The induction variable should carry the scaling factor: 1 * 8 = 8.
; CHECK: [[IVNEXT]] = add nuw nsw i64 [[IV]], 8
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx2 = getelementptr inbounds double, double* %c, i64 %indvars.iv.next
- %tmp2 = load double* %arrayidx2, align 8
+ %tmp2 = load double, double* %arrayidx2, align 8
%mul = fmul double %tmp1, %tmp2
%arrayidx4 = getelementptr inbounds double, double* %a, i64 %indvars.iv
store double %mul, double* %arrayidx4, align 8
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-scvt.ll Fri Feb 27 15:17:42 2015
@@ -7,7 +7,7 @@ entry:
; CHECK-LABEL: t1:
; CHECK: ldr s0, [x0]
; CHECK: scvtf s0, s0
- %tmp1 = load i32* %src, align 4
+ %tmp1 = load i32, i32* %src, align 4
%tmp2 = sitofp i32 %tmp1 to float
ret float %tmp2
}
@@ -17,7 +17,7 @@ entry:
; CHECK-LABEL: t2:
; CHECK: ldr s0, [x0]
; CHECK: ucvtf s0, s0
- %tmp1 = load i32* %src, align 4
+ %tmp1 = load i32, i32* %src, align 4
%tmp2 = uitofp i32 %tmp1 to float
ret float %tmp2
}
@@ -27,7 +27,7 @@ entry:
; CHECK-LABEL: t3:
; CHECK: ldr d0, [x0]
; CHECK: scvtf d0, d0
- %tmp1 = load i64* %src, align 4
+ %tmp1 = load i64, i64* %src, align 4
%tmp2 = sitofp i64 %tmp1 to double
ret double %tmp2
}
@@ -37,7 +37,7 @@ entry:
; CHECK-LABEL: t4:
; CHECK: ldr d0, [x0]
; CHECK: ucvtf d0, d0
- %tmp1 = load i64* %src, align 4
+ %tmp1 = load i64, i64* %src, align 4
%tmp2 = uitofp i64 %tmp1 to double
ret double %tmp2
}
@@ -48,7 +48,7 @@ entry:
; CHECK-LABEL: t5:
; CHECK: ldr [[REG:w[0-9]+]], [x0]
; CHECK: scvtf d0, [[REG]]
- %tmp1 = load i32* %src, align 4
+ %tmp1 = load i32, i32* %src, align 4
%tmp2 = sitofp i32 %tmp1 to double
ret double %tmp2
}
@@ -76,7 +76,7 @@ define float @fct1(i8* nocapture %sp0) {
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 1
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -89,7 +89,7 @@ define float @fct2(i16* nocapture %sp0)
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 1
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -102,7 +102,7 @@ define float @fct3(i32* nocapture %sp0)
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 1
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -116,7 +116,7 @@ define float @fct4(i64* nocapture %sp0)
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 1
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -130,7 +130,7 @@ define float @fct5(i8* nocapture %sp0, i
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -143,7 +143,7 @@ define float @fct6(i16* nocapture %sp0,
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -156,7 +156,7 @@ define float @fct7(i32* nocapture %sp0,
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -170,7 +170,7 @@ define float @fct8(i64* nocapture %sp0,
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -185,7 +185,7 @@ define double @fct9(i8* nocapture %sp0)
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 1
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -198,7 +198,7 @@ define double @fct10(i16* nocapture %sp0
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 1
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -211,7 +211,7 @@ define double @fct11(i32* nocapture %sp0
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 1
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -224,7 +224,7 @@ define double @fct12(i64* nocapture %sp0
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 1
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -238,7 +238,7 @@ define double @fct13(i8* nocapture %sp0,
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -251,7 +251,7 @@ define double @fct14(i16* nocapture %sp0
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -264,7 +264,7 @@ define double @fct15(i32* nocapture %sp0
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -277,7 +277,7 @@ define double @fct16(i64* nocapture %sp0
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -293,7 +293,7 @@ entry:
%bitcast = ptrtoint i8* %sp0 to i64
%add = add i64 %bitcast, -1
%addr = inttoptr i64 %add to i8*
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -307,7 +307,7 @@ define float @fct18(i16* nocapture %sp0)
%bitcast = ptrtoint i16* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i16*
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -321,7 +321,7 @@ define float @fct19(i32* nocapture %sp0)
%bitcast = ptrtoint i32* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i32*
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -336,7 +336,7 @@ define float @fct20(i64* nocapture %sp0)
%bitcast = ptrtoint i64* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i64*
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -353,7 +353,7 @@ entry:
%bitcast = ptrtoint i8* %sp0 to i64
%add = add i64 %bitcast, -1
%addr = inttoptr i64 %add to i8*
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = uitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -367,7 +367,7 @@ define double @fct22(i16* nocapture %sp0
%bitcast = ptrtoint i16* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i16*
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = uitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -381,7 +381,7 @@ define double @fct23(i32* nocapture %sp0
%bitcast = ptrtoint i32* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i32*
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = uitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -395,7 +395,7 @@ define double @fct24(i64* nocapture %sp0
%bitcast = ptrtoint i64* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i64*
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = uitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -416,7 +416,7 @@ define float @sfct1(i8* nocapture %sp0)
; CHECK-A57-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 1
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -430,7 +430,7 @@ define float @sfct2(i16* nocapture %sp0)
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 1
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -443,7 +443,7 @@ define float @sfct3(i32* nocapture %sp0)
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 1
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -457,7 +457,7 @@ define float @sfct4(i64* nocapture %sp0)
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 1
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -477,7 +477,7 @@ define float @sfct5(i8* nocapture %sp0,
; CHECK-A57-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -491,7 +491,7 @@ define float @sfct6(i16* nocapture %sp0,
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -504,7 +504,7 @@ define float @sfct7(i32* nocapture %sp0,
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -518,7 +518,7 @@ define float @sfct8(i64* nocapture %sp0,
; CHECK-NEXT: fmul s0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -532,7 +532,7 @@ define double @sfct9(i8* nocapture %sp0)
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 1
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -551,7 +551,7 @@ define double @sfct10(i16* nocapture %sp
; CHECK-A57-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 1
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -565,7 +565,7 @@ define double @sfct11(i32* nocapture %sp
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 1
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -578,7 +578,7 @@ define double @sfct12(i64* nocapture %sp
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 1
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -592,7 +592,7 @@ define double @sfct13(i8* nocapture %sp0
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i8, i8* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -611,7 +611,7 @@ define double @sfct14(i16* nocapture %sp
; CHECK-A57-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i16, i16* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -625,7 +625,7 @@ define double @sfct15(i32* nocapture %sp
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -638,7 +638,7 @@ define double @sfct16(i64* nocapture %sp
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i64, i64* %sp0, i64 %offset
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -660,7 +660,7 @@ entry:
%bitcast = ptrtoint i8* %sp0 to i64
%add = add i64 %bitcast, -1
%addr = inttoptr i64 %add to i8*
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -675,7 +675,7 @@ define float @sfct18(i16* nocapture %sp0
%bitcast = ptrtoint i16* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i16*
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -689,7 +689,7 @@ define float @sfct19(i32* nocapture %sp0
%bitcast = ptrtoint i32* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i32*
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -704,7 +704,7 @@ define float @sfct20(i64* nocapture %sp0
%bitcast = ptrtoint i64* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i64*
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -721,7 +721,7 @@ entry:
%bitcast = ptrtoint i8* %sp0 to i64
%add = add i64 %bitcast, -1
%addr = inttoptr i64 %add to i8*
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -741,7 +741,7 @@ define double @sfct22(i16* nocapture %sp
%bitcast = ptrtoint i16* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i16*
- %pix_sp0.0.copyload = load i16* %addr, align 1
+ %pix_sp0.0.copyload = load i16, i16* %addr, align 1
%val = sitofp i16 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -756,7 +756,7 @@ define double @sfct23(i32* nocapture %sp
%bitcast = ptrtoint i32* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i32*
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -770,7 +770,7 @@ define double @sfct24(i64* nocapture %sp
%bitcast = ptrtoint i64* %sp0 to i64
%add = add i64 %bitcast, 1
%addr = inttoptr i64 %add to i64*
- %pix_sp0.0.copyload = load i64* %addr, align 1
+ %pix_sp0.0.copyload = load i64, i64* %addr, align 1
%val = sitofp i64 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
@@ -787,7 +787,7 @@ entry:
%bitcast = ptrtoint i8* %sp0 to i64
%add = add i64 %bitcast, -1
%addr = inttoptr i64 %add to i8*
- %pix_sp0.0.copyload = load i8* %addr, align 1
+ %pix_sp0.0.copyload = load i8, i8* %addr, align 1
%val = sitofp i8 %pix_sp0.0.copyload to float
%vmull.i = fmul float %val, %val
ret float %vmull.i
@@ -800,7 +800,7 @@ define double @codesize_sfct11(i32* noca
; CHECK-NEXT: fmul d0, [[REG]], [[REG]]
entry:
%addr = getelementptr i32, i32* %sp0, i64 1
- %pix_sp0.0.copyload = load i32* %addr, align 1
+ %pix_sp0.0.copyload = load i32, i32* %addr, align 1
%val = sitofp i32 %pix_sp0.0.copyload to double
%vmull.i = fmul double %val, %val
ret double %vmull.i
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-sitofp-combine-chains.ll Fri Feb 27 15:17:42 2015
@@ -14,7 +14,7 @@ define float @foo() {
; CHECK: ldr [[SREG:s[0-9]+]], [x[[VARBASE]],
; CHECK: str wzr, [x[[VARBASE]],
- %val = load i32* @var, align 4
+ %val = load i32, i32* @var, align 4
store i32 0, i32* @var, align 4
%fltval = sitofp i32 %val to float
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-spill-lr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-spill-lr.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-spill-lr.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-spill-lr.ll Fri Feb 27 15:17:42 2015
@@ -13,29 +13,29 @@ entry:
%idxprom = sext i32 %a to i64
%arrayidx = getelementptr inbounds [128 x i32], [128 x i32]* %stack, i64 0, i64 %idxprom
store i32 %b, i32* %arrayidx, align 4
- %1 = load volatile i32* @bar, align 4
- %2 = load volatile i32* @bar, align 4
- %3 = load volatile i32* @bar, align 4
- %4 = load volatile i32* @bar, align 4
- %5 = load volatile i32* @bar, align 4
- %6 = load volatile i32* @bar, align 4
- %7 = load volatile i32* @bar, align 4
- %8 = load volatile i32* @bar, align 4
- %9 = load volatile i32* @bar, align 4
- %10 = load volatile i32* @bar, align 4
- %11 = load volatile i32* @bar, align 4
- %12 = load volatile i32* @bar, align 4
- %13 = load volatile i32* @bar, align 4
- %14 = load volatile i32* @bar, align 4
- %15 = load volatile i32* @bar, align 4
- %16 = load volatile i32* @bar, align 4
- %17 = load volatile i32* @bar, align 4
- %18 = load volatile i32* @bar, align 4
- %19 = load volatile i32* @bar, align 4
- %20 = load volatile i32* @bar, align 4
+ %1 = load volatile i32, i32* @bar, align 4
+ %2 = load volatile i32, i32* @bar, align 4
+ %3 = load volatile i32, i32* @bar, align 4
+ %4 = load volatile i32, i32* @bar, align 4
+ %5 = load volatile i32, i32* @bar, align 4
+ %6 = load volatile i32, i32* @bar, align 4
+ %7 = load volatile i32, i32* @bar, align 4
+ %8 = load volatile i32, i32* @bar, align 4
+ %9 = load volatile i32, i32* @bar, align 4
+ %10 = load volatile i32, i32* @bar, align 4
+ %11 = load volatile i32, i32* @bar, align 4
+ %12 = load volatile i32, i32* @bar, align 4
+ %13 = load volatile i32, i32* @bar, align 4
+ %14 = load volatile i32, i32* @bar, align 4
+ %15 = load volatile i32, i32* @bar, align 4
+ %16 = load volatile i32, i32* @bar, align 4
+ %17 = load volatile i32, i32* @bar, align 4
+ %18 = load volatile i32, i32* @bar, align 4
+ %19 = load volatile i32, i32* @bar, align 4
+ %20 = load volatile i32, i32* @bar, align 4
%idxprom1 = sext i32 %c to i64
%arrayidx2 = getelementptr inbounds [128 x i32], [128 x i32]* %stack, i64 0, i64 %idxprom1
- %21 = load i32* %arrayidx2, align 4
+ %21 = load i32, i32* %arrayidx2, align 4
%factor = mul i32 %h, -2
%factor67 = mul i32 %g, -2
%factor68 = mul i32 %f, -2
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-spill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-spill.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-spill.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-spill.ll Fri Feb 27 15:17:42 2015
@@ -8,7 +8,7 @@
; CHECK: st1.2d
define void @fpr128(<4 x float>* %p) nounwind ssp {
entry:
- %x = load <4 x float>* %p, align 16
+ %x = load <4 x float>, <4 x float>* %p, align 16
call void asm sideeffect "; inlineasm", "~{q0},~{q1},~{q2},~{q3},~{q4},~{q5},~{q6},~{q7},~{q8},~{q9},~{q10},~{q11},~{q12},~{q13},~{q14},~{q15},~{q16},~{q17},~{q18},~{q19},~{q20},~{q21},~{q22},~{q23},~{q24},~{q25},~{q26},~{q27},~{q28},~{q29},~{q30},~{q31},~{x0},~{x1},~{x2},~{x3},~{x4},~{x5},~{x6},~{x7},~{x8},~{x9},~{x10},~{x11},~{x12},~{x13},~{x14},~{x15},~{x16},~{x17},~{x18},~{x19},~{x20},~{x21},~{x22},~{x23},~{x24},~{x25},~{x26},~{x27},~{x28},~{fp},~{lr},~{sp},~{memory}"() nounwind
store <4 x float> %x, <4 x float>* %p, align 16
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-stack-no-frame.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-stack-no-frame.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-stack-no-frame.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-stack-no-frame.ll Fri Feb 27 15:17:42 2015
@@ -9,10 +9,10 @@ define void @test_stack_no_frame() {
; CHECK: test_stack_no_frame
; CHECK: sub sp, sp, #[[STACKSIZE:[0-9]+]]
%local = alloca [20 x i64]
- %val = load volatile [20 x i64]* @global, align 8
+ %val = load volatile [20 x i64], [20 x i64]* @global, align 8
store volatile [20 x i64] %val, [20 x i64]* %local, align 8
- %val2 = load volatile [20 x i64]* %local, align 8
+ %val2 = load volatile [20 x i64], [20 x i64]* %local, align 8
store volatile [20 x i64] %val2, [20 x i64]* @global, align 8
; CHECK: add sp, sp, #[[STACKSIZE]]
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-strict-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-strict-align.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-strict-align.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-strict-align.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@ define i32 @f0(i32* nocapture %p) nounwi
; CHECK: ldr w0, [x0]
; CHECK: ret
- %tmp = load i32* %p, align 2
+ %tmp = load i32, i32* %p, align 2
ret i32 %tmp
}
@@ -21,6 +21,6 @@ define i64 @f1(i64* nocapture %p) nounwi
; CHECK: ldr x0, [x0]
; CHECK: ret
- %tmp = load i64* %p, align 4
+ %tmp = load i64, i64* %p, align 4
ret i64 %tmp
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-tls-darwin.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-tls-darwin.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-tls-darwin.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-tls-darwin.ll Fri Feb 27 15:17:42 2015
@@ -13,6 +13,6 @@ define i8 @get_var() {
; CHECK: blr [[TLV_GET_ADDR]]
; CHECK: ldrb w0, [x0]
- %val = load i8* @var, align 1
+ %val = load i8, i8* @var, align 1
ret i8 %val
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamic-together.ll Fri Feb 27 15:17:42 2015
@@ -10,7 +10,7 @@
define i32 @test_generaldynamic() {
; CHECK-LABEL: test_generaldynamic:
- %val = load i32* @general_dynamic_var
+ %val = load i32, i32* @general_dynamic_var
ret i32 %val
; CHECK: .tlsdesccall general_dynamic_var
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-tls-dynamics.ll Fri Feb 27 15:17:42 2015
@@ -6,7 +6,7 @@
define i32 @test_generaldynamic() {
; CHECK-LABEL: test_generaldynamic:
- %val = load i32* @general_dynamic_var
+ %val = load i32, i32* @general_dynamic_var
ret i32 %val
; FIXME: the adrp instructions are redundant (if harmless).
@@ -54,7 +54,7 @@ define i32* @test_generaldynamic_addr()
define i32 @test_localdynamic() {
; CHECK-LABEL: test_localdynamic:
- %val = load i32* @local_dynamic_var
+ %val = load i32, i32* @local_dynamic_var
ret i32 %val
; CHECK: adrp x[[TLSDESC_HI:[0-9]+]], :tlsdesc:_TLS_MODULE_BASE_
@@ -116,8 +116,8 @@ define i32* @test_localdynamic_addr() {
define i32 @test_localdynamic_deduplicate() {
; CHECK-LABEL: test_localdynamic_deduplicate:
- %val = load i32* @local_dynamic_var
- %val2 = load i32* @local_dynamic_var2
+ %val = load i32, i32* @local_dynamic_var
+ %val2 = load i32, i32* @local_dynamic_var2
%sum = add i32 %val, %val2
ret i32 %sum
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-tls-execs.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@
define i32 @test_initial_exec() {
; CHECK-LABEL: test_initial_exec:
- %val = load i32* @initial_exec_var
+ %val = load i32, i32* @initial_exec_var
; CHECK: adrp x[[GOTADDR:[0-9]+]], :gottprel:initial_exec_var
; CHECK: ldr x[[TP_OFFSET:[0-9]+]], [x[[GOTADDR]], :gottprel_lo12:initial_exec_var]
@@ -36,7 +36,7 @@ define i32* @test_initial_exec_addr() {
define i32 @test_local_exec() {
; CHECK-LABEL: test_local_exec:
- %val = load i32* @local_exec_var
+ %val = load i32, i32* @local_exec_var
; CHECK: movz [[TP_OFFSET:x[0-9]+]], #:tprel_g1:local_exec_var // encoding: [0bAAA{{[01]+}},A,0b101AAAAA,0x92]
; CHECK: movk [[TP_OFFSET]], #:tprel_g0_nc:local_exec_var
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-triv-disjoint-mem-access.ll Fri Feb 27 15:17:42 2015
@@ -8,11 +8,11 @@ define i32 @func(i32 %i, i32 %j, i32 %k)
entry:
; CHECK: ldr {{w[0-9]+}}, [x[[REG:[0-9]+]], #4]
; CHECK: str {{w[0-9]+}}, [x[[REG]], #8]
- %0 = load i32** @a, align 8, !tbaa !1
+ %0 = load i32*, i32** @a, align 8, !tbaa !1
%arrayidx = getelementptr inbounds i32, i32* %0, i64 2
store i32 %i, i32* %arrayidx, align 4, !tbaa !5
%arrayidx1 = getelementptr inbounds i32, i32* %0, i64 1
- %1 = load i32* %arrayidx1, align 4, !tbaa !5
+ %1 = load i32, i32* %arrayidx1, align 4, !tbaa !5
%add = add nsw i32 %k, %i
store i32 %add, i32* @m, align 4, !tbaa !5
ret i32 %1
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-trn.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@ define <8 x i8> @vtrni8(<8 x i8>* %A, <8
;CHECK: trn1.8b
;CHECK: trn2.8b
;CHECK-NEXT: add.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
%tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -18,8 +18,8 @@ define <4 x i16> @vtrni16(<4 x i16>* %A,
;CHECK: trn1.4h
;CHECK: trn2.4h
;CHECK-NEXT: add.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
%tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -32,8 +32,8 @@ define <2 x i32> @vtrni32(<2 x i32>* %A,
;CHECK: zip1.2s
;CHECK: zip2.2s
;CHECK-NEXT: add.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 0, i32 2>
%tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> <i32 1, i32 3>
%tmp5 = add <2 x i32> %tmp3, %tmp4
@@ -45,8 +45,8 @@ define <2 x float> @vtrnf(<2 x float>* %
;CHECK: zip1.2s
;CHECK: zip2.2s
;CHECK-NEXT: fadd.2s
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
%tmp3 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 0, i32 2>
%tmp4 = shufflevector <2 x float> %tmp1, <2 x float> %tmp2, <2 x i32> <i32 1, i32 3>
%tmp5 = fadd <2 x float> %tmp3, %tmp4
@@ -58,8 +58,8 @@ define <16 x i8> @vtrnQi8(<16 x i8>* %A,
;CHECK: trn1.16b
;CHECK: trn2.16b
;CHECK-NEXT: add.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 2, i32 18, i32 4, i32 20, i32 6, i32 22, i32 8, i32 24, i32 10, i32 26, i32 12, i32 28, i32 14, i32 30>
%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
%tmp5 = add <16 x i8> %tmp3, %tmp4
@@ -71,8 +71,8 @@ define <8 x i16> @vtrnQi16(<8 x i16>* %A
;CHECK: trn1.8h
;CHECK: trn2.8h
;CHECK-NEXT: add.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 2, i32 10, i32 4, i32 12, i32 6, i32 14>
%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 13, i32 7, i32 15>
%tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -84,8 +84,8 @@ define <4 x i32> @vtrnQi32(<4 x i32>* %A
;CHECK: trn1.4s
;CHECK: trn2.4s
;CHECK-NEXT: add.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
%tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -97,8 +97,8 @@ define <4 x float> @vtrnQf(<4 x float>*
;CHECK: trn1.4s
;CHECK: trn2.4s
;CHECK-NEXT: fadd.4s
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 5, i32 3, i32 7>
%tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -112,8 +112,8 @@ define <8 x i8> @vtrni8_undef(<8 x i8>*
;CHECK: trn1.8b
;CHECK: trn2.8b
;CHECK-NEXT: add.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 undef, i32 2, i32 10, i32 undef, i32 12, i32 6, i32 14>
%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 9, i32 3, i32 11, i32 5, i32 undef, i32 undef, i32 15>
%tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -125,8 +125,8 @@ define <8 x i16> @vtrnQi16_undef(<8 x i1
;CHECK: trn1.8h
;CHECK: trn2.8h
;CHECK-NEXT: add.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 8, i32 undef, i32 undef, i32 4, i32 12, i32 6, i32 14>
%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 undef, i32 3, i32 11, i32 5, i32 13, i32 undef, i32 undef>
%tmp5 = add <8 x i16> %tmp3, %tmp4
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-trunc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-trunc-store.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-trunc-store.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-trunc-store.ll Fri Feb 27 15:17:42 2015
@@ -25,7 +25,7 @@ define void @fct32(i32 %arg, i64 %var) {
; CHECK-NEXT: str w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #2]
; CHECK-NEXT: ret
bb:
- %.pre37 = load i32** @zptr32, align 8
+ %.pre37 = load i32*, i32** @zptr32, align 8
%dec = add nsw i32 %arg, -1
%idxprom8 = sext i32 %dec to i64
%arrayidx9 = getelementptr inbounds i32, i32* %.pre37, i64 %idxprom8
@@ -45,7 +45,7 @@ define void @fct16(i32 %arg, i64 %var) {
; CHECK-NEXT: strh w1, {{\[}}[[GLOBALADDR]], w[[OFFSETREGNUM]], sxtw #1]
; CHECK-NEXT: ret
bb:
- %.pre37 = load i16** @zptr16, align 8
+ %.pre37 = load i16*, i16** @zptr16, align 8
%dec = add nsw i32 %arg, -1
%idxprom8 = sext i32 %dec to i64
%arrayidx9 = getelementptr inbounds i16, i16* %.pre37, i64 %idxprom8
@@ -65,7 +65,7 @@ define void @fct8(i32 %arg, i64 %var) {
; CHECK-NEXT: sturb w1, {{\[}}[[ADDR]], #-1]
; CHECK-NEXT: ret
bb:
- %.pre37 = load i8** @zptr8, align 8
+ %.pre37 = load i8*, i8** @zptr8, align 8
%dec = add nsw i32 %arg, -1
%idxprom8 = sext i32 %dec to i64
%arrayidx9 = getelementptr inbounds i8, i8* %.pre37, i64 %idxprom8
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-unaligned_ldst.ll Fri Feb 27 15:17:42 2015
@@ -9,7 +9,7 @@ entry:
; CHECK: str [[X0]], [x0]
%tmp1 = bitcast i8* %b to i64*
%tmp2 = bitcast i8* %a to i64*
- %tmp3 = load i64* %tmp1, align 1
+ %tmp3 = load i64, i64* %tmp1, align 1
store i64 %tmp3, i64* %tmp2, align 1
ret void
}
@@ -22,7 +22,7 @@ entry:
; CHECK: str [[W0]], [x0]
%tmp1 = bitcast i8* %b to i32*
%tmp2 = bitcast i8* %a to i32*
- %tmp3 = load i32* %tmp1, align 1
+ %tmp3 = load i32, i32* %tmp1, align 1
store i32 %tmp3, i32* %tmp2, align 1
ret void
}
@@ -35,7 +35,7 @@ entry:
; CHECK: strh [[W0]], [x0]
%tmp1 = bitcast i8* %b to i16*
%tmp2 = bitcast i8* %a to i16*
- %tmp3 = load i16* %tmp1, align 1
+ %tmp3 = load i16, i16* %tmp1, align 1
store i16 %tmp3, i16* %tmp2, align 1
ret void
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-uzp.ll Fri Feb 27 15:17:42 2015
@@ -5,8 +5,8 @@ define <8 x i8> @vuzpi8(<8 x i8>* %A, <8
;CHECK: uzp1.8b
;CHECK: uzp2.8b
;CHECK-NEXT: add.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
%tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -18,8 +18,8 @@ define <4 x i16> @vuzpi16(<4 x i16>* %A,
;CHECK: uzp1.4h
;CHECK: uzp2.4h
;CHECK-NEXT: add.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
%tmp5 = add <4 x i16> %tmp3, %tmp4
@@ -31,8 +31,8 @@ define <16 x i8> @vuzpQi8(<16 x i8>* %A,
;CHECK: uzp1.16b
;CHECK: uzp2.16b
;CHECK-NEXT: add.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
%tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15, i32 17, i32 19, i32 21, i32 23, i32 25, i32 27, i32 29, i32 31>
%tmp5 = add <16 x i8> %tmp3, %tmp4
@@ -44,8 +44,8 @@ define <8 x i16> @vuzpQi16(<8 x i16>* %A
;CHECK: uzp1.8h
;CHECK: uzp2.8h
;CHECK-NEXT: add.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
%tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -57,8 +57,8 @@ define <4 x i32> @vuzpQi32(<4 x i32>* %A
;CHECK: uzp1.4s
;CHECK: uzp2.4s
;CHECK-NEXT: add.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
%tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -70,8 +70,8 @@ define <4 x float> @vuzpQf(<4 x float>*
;CHECK: uzp1.4s
;CHECK: uzp2.4s
;CHECK-NEXT: fadd.4s
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
%tmp3 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
%tmp4 = shufflevector <4 x float> %tmp1, <4 x float> %tmp2, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
%tmp5 = fadd <4 x float> %tmp3, %tmp4
@@ -85,8 +85,8 @@ define <8 x i8> @vuzpi8_undef(<8 x i8>*
;CHECK: uzp1.8b
;CHECK: uzp2.8b
;CHECK-NEXT: add.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 0, i32 2, i32 undef, i32 undef, i32 8, i32 10, i32 12, i32 14>
%tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 7, i32 undef, i32 undef, i32 13, i32 15>
%tmp5 = add <8 x i8> %tmp3, %tmp4
@@ -98,8 +98,8 @@ define <8 x i16> @vuzpQi16_undef(<8 x i1
;CHECK: uzp1.8h
;CHECK: uzp2.8h
;CHECK-NEXT: add.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 undef, i32 4, i32 undef, i32 8, i32 10, i32 12, i32 14>
%tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 1, i32 3, i32 5, i32 undef, i32 undef, i32 11, i32 13, i32 15>
%tmp5 = add <8 x i16> %tmp3, %tmp4
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vabs.ll Fri Feb 27 15:17:42 2015
@@ -4,8 +4,8 @@
define <8 x i16> @sabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: sabdl8h:
;CHECK: sabdl.8h
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
%tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@@ -14,8 +14,8 @@ define <8 x i16> @sabdl8h(<8 x i8>* %A,
define <4 x i32> @sabdl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: sabdl4s:
;CHECK: sabdl.4s
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
%tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@@ -24,8 +24,8 @@ define <4 x i32> @sabdl4s(<4 x i16>* %A,
define <2 x i64> @sabdl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: sabdl2d:
;CHECK: sabdl.2d
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
%tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -34,8 +34,8 @@ define <2 x i64> @sabdl2d(<2 x i32>* %A,
define <8 x i16> @sabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: sabdl2_8h:
;CHECK: sabdl2.8h
- %load1 = load <16 x i8>* %A
- %load2 = load <16 x i8>* %B
+ %load1 = load <16 x i8>, <16 x i8>* %A
+ %load2 = load <16 x i8>, <16 x i8>* %B
%tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -46,8 +46,8 @@ define <8 x i16> @sabdl2_8h(<16 x i8>* %
define <4 x i32> @sabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: sabdl2_4s:
;CHECK: sabdl2.4s
- %load1 = load <8 x i16>* %A
- %load2 = load <8 x i16>* %B
+ %load1 = load <8 x i16>, <8 x i16>* %A
+ %load2 = load <8 x i16>, <8 x i16>* %B
%tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -58,8 +58,8 @@ define <4 x i32> @sabdl2_4s(<8 x i16>* %
define <2 x i64> @sabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: sabdl2_2d:
;CHECK: sabdl2.2d
- %load1 = load <4 x i32>* %A
- %load2 = load <4 x i32>* %B
+ %load1 = load <4 x i32>, <4 x i32>* %A
+ %load2 = load <4 x i32>, <4 x i32>* %B
%tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -70,8 +70,8 @@ define <2 x i64> @sabdl2_2d(<4 x i32>* %
define <8 x i16> @uabdl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: uabdl8h:
;CHECK: uabdl.8h
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
%tmp4 = zext <8 x i8> %tmp3 to <8 x i16>
ret <8 x i16> %tmp4
@@ -80,8 +80,8 @@ define <8 x i16> @uabdl8h(<8 x i8>* %A,
define <4 x i32> @uabdl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: uabdl4s:
;CHECK: uabdl.4s
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
%tmp4 = zext <4 x i16> %tmp3 to <4 x i32>
ret <4 x i32> %tmp4
@@ -90,8 +90,8 @@ define <4 x i32> @uabdl4s(<4 x i16>* %A,
define <2 x i64> @uabdl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: uabdl2d:
;CHECK: uabdl.2d
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
%tmp4 = zext <2 x i32> %tmp3 to <2 x i64>
ret <2 x i64> %tmp4
@@ -100,8 +100,8 @@ define <2 x i64> @uabdl2d(<2 x i32>* %A,
define <8 x i16> @uabdl2_8h(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: uabdl2_8h:
;CHECK: uabdl2.8h
- %load1 = load <16 x i8>* %A
- %load2 = load <16 x i8>* %B
+ %load1 = load <16 x i8>, <16 x i8>* %A
+ %load2 = load <16 x i8>, <16 x i8>* %B
%tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
@@ -113,8 +113,8 @@ define <8 x i16> @uabdl2_8h(<16 x i8>* %
define <4 x i32> @uabdl2_4s(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: uabdl2_4s:
;CHECK: uabdl2.4s
- %load1 = load <8 x i16>* %A
- %load2 = load <8 x i16>* %B
+ %load1 = load <8 x i16>, <8 x i16>* %A
+ %load2 = load <8 x i16>, <8 x i16>* %B
%tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -125,8 +125,8 @@ define <4 x i32> @uabdl2_4s(<8 x i16>* %
define <2 x i64> @uabdl2_2d(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: uabdl2_2d:
;CHECK: uabdl2.2d
- %load1 = load <4 x i32>* %A
- %load2 = load <4 x i32>* %B
+ %load1 = load <4 x i32>, <4 x i32>* %A
+ %load2 = load <4 x i32>, <4 x i32>* %B
%tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -137,8 +137,8 @@ define <2 x i64> @uabdl2_2d(<4 x i32>* %
define <2 x float> @fabd_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: fabd_2s:
;CHECK: fabd.2s
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
%tmp3 = call <2 x float> @llvm.aarch64.neon.fabd.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
ret <2 x float> %tmp3
}
@@ -146,8 +146,8 @@ define <2 x float> @fabd_2s(<2 x float>*
define <4 x float> @fabd_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK-LABEL: fabd_4s:
;CHECK: fabd.4s
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
%tmp3 = call <4 x float> @llvm.aarch64.neon.fabd.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
ret <4 x float> %tmp3
}
@@ -155,8 +155,8 @@ define <4 x float> @fabd_4s(<4 x float>*
define <2 x double> @fabd_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
;CHECK-LABEL: fabd_2d:
;CHECK: fabd.2d
- %tmp1 = load <2 x double>* %A
- %tmp2 = load <2 x double>* %B
+ %tmp1 = load <2 x double>, <2 x double>* %A
+ %tmp2 = load <2 x double>, <2 x double>* %B
%tmp3 = call <2 x double> @llvm.aarch64.neon.fabd.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
ret <2 x double> %tmp3
}
@@ -168,8 +168,8 @@ declare <2 x double> @llvm.aarch64.neon.
define <8 x i8> @sabd_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: sabd_8b:
;CHECK: sabd.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
ret <8 x i8> %tmp3
}
@@ -177,8 +177,8 @@ define <8 x i8> @sabd_8b(<8 x i8>* %A, <
define <16 x i8> @sabd_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: sabd_16b:
;CHECK: sabd.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
ret <16 x i8> %tmp3
}
@@ -186,8 +186,8 @@ define <16 x i8> @sabd_16b(<16 x i8>* %A
define <4 x i16> @sabd_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: sabd_4h:
;CHECK: sabd.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
ret <4 x i16> %tmp3
}
@@ -195,8 +195,8 @@ define <4 x i16> @sabd_4h(<4 x i16>* %A,
define <8 x i16> @sabd_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: sabd_8h:
;CHECK: sabd.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
ret <8 x i16> %tmp3
}
@@ -204,8 +204,8 @@ define <8 x i16> @sabd_8h(<8 x i16>* %A,
define <2 x i32> @sabd_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: sabd_2s:
;CHECK: sabd.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
ret <2 x i32> %tmp3
}
@@ -213,8 +213,8 @@ define <2 x i32> @sabd_2s(<2 x i32>* %A,
define <4 x i32> @sabd_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: sabd_4s:
;CHECK: sabd.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
ret <4 x i32> %tmp3
}
@@ -229,8 +229,8 @@ declare <4 x i32> @llvm.aarch64.neon.sab
define <8 x i8> @uabd_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: uabd_8b:
;CHECK: uabd.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
ret <8 x i8> %tmp3
}
@@ -238,8 +238,8 @@ define <8 x i8> @uabd_8b(<8 x i8>* %A, <
define <16 x i8> @uabd_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: uabd_16b:
;CHECK: uabd.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
ret <16 x i8> %tmp3
}
@@ -247,8 +247,8 @@ define <16 x i8> @uabd_16b(<16 x i8>* %A
define <4 x i16> @uabd_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: uabd_4h:
;CHECK: uabd.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
ret <4 x i16> %tmp3
}
@@ -256,8 +256,8 @@ define <4 x i16> @uabd_4h(<4 x i16>* %A,
define <8 x i16> @uabd_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: uabd_8h:
;CHECK: uabd.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
ret <8 x i16> %tmp3
}
@@ -265,8 +265,8 @@ define <8 x i16> @uabd_8h(<8 x i16>* %A,
define <2 x i32> @uabd_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: uabd_2s:
;CHECK: uabd.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
ret <2 x i32> %tmp3
}
@@ -274,8 +274,8 @@ define <2 x i32> @uabd_2s(<2 x i32>* %A,
define <4 x i32> @uabd_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: uabd_4s:
;CHECK: uabd.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
ret <4 x i32> %tmp3
}
@@ -290,7 +290,7 @@ declare <4 x i32> @llvm.aarch64.neon.uab
define <8 x i8> @sqabs_8b(<8 x i8>* %A) nounwind {
;CHECK-LABEL: sqabs_8b:
;CHECK: sqabs.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqabs.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp3
}
@@ -298,7 +298,7 @@ define <8 x i8> @sqabs_8b(<8 x i8>* %A)
define <16 x i8> @sqabs_16b(<16 x i8>* %A) nounwind {
;CHECK-LABEL: sqabs_16b:
;CHECK: sqabs.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqabs.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp3
}
@@ -306,7 +306,7 @@ define <16 x i8> @sqabs_16b(<16 x i8>* %
define <4 x i16> @sqabs_4h(<4 x i16>* %A) nounwind {
;CHECK-LABEL: sqabs_4h:
;CHECK: sqabs.4h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqabs.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp3
}
@@ -314,7 +314,7 @@ define <4 x i16> @sqabs_4h(<4 x i16>* %A
define <8 x i16> @sqabs_8h(<8 x i16>* %A) nounwind {
;CHECK-LABEL: sqabs_8h:
;CHECK: sqabs.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqabs.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp3
}
@@ -322,7 +322,7 @@ define <8 x i16> @sqabs_8h(<8 x i16>* %A
define <2 x i32> @sqabs_2s(<2 x i32>* %A) nounwind {
;CHECK-LABEL: sqabs_2s:
;CHECK: sqabs.2s
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqabs.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp3
}
@@ -330,7 +330,7 @@ define <2 x i32> @sqabs_2s(<2 x i32>* %A
define <4 x i32> @sqabs_4s(<4 x i32>* %A) nounwind {
;CHECK-LABEL: sqabs_4s:
;CHECK: sqabs.4s
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqabs.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp3
}
@@ -345,7 +345,7 @@ declare <4 x i32> @llvm.aarch64.neon.sqa
define <8 x i8> @sqneg_8b(<8 x i8>* %A) nounwind {
;CHECK-LABEL: sqneg_8b:
;CHECK: sqneg.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <8 x i8> @llvm.aarch64.neon.sqneg.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp3
}
@@ -353,7 +353,7 @@ define <8 x i8> @sqneg_8b(<8 x i8>* %A)
define <16 x i8> @sqneg_16b(<16 x i8>* %A) nounwind {
;CHECK-LABEL: sqneg_16b:
;CHECK: sqneg.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <16 x i8> @llvm.aarch64.neon.sqneg.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp3
}
@@ -361,7 +361,7 @@ define <16 x i8> @sqneg_16b(<16 x i8>* %
define <4 x i16> @sqneg_4h(<4 x i16>* %A) nounwind {
;CHECK-LABEL: sqneg_4h:
;CHECK: sqneg.4h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.sqneg.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp3
}
@@ -369,7 +369,7 @@ define <4 x i16> @sqneg_4h(<4 x i16>* %A
define <8 x i16> @sqneg_8h(<8 x i16>* %A) nounwind {
;CHECK-LABEL: sqneg_8h:
;CHECK: sqneg.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.sqneg.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp3
}
@@ -377,7 +377,7 @@ define <8 x i16> @sqneg_8h(<8 x i16>* %A
define <2 x i32> @sqneg_2s(<2 x i32>* %A) nounwind {
;CHECK-LABEL: sqneg_2s:
;CHECK: sqneg.2s
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.sqneg.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp3
}
@@ -385,7 +385,7 @@ define <2 x i32> @sqneg_2s(<2 x i32>* %A
define <4 x i32> @sqneg_4s(<4 x i32>* %A) nounwind {
;CHECK-LABEL: sqneg_4s:
;CHECK: sqneg.4s
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.sqneg.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp3
}
@@ -400,7 +400,7 @@ declare <4 x i32> @llvm.aarch64.neon.sqn
define <8 x i8> @abs_8b(<8 x i8>* %A) nounwind {
;CHECK-LABEL: abs_8b:
;CHECK: abs.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <8 x i8> @llvm.aarch64.neon.abs.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp3
}
@@ -408,7 +408,7 @@ define <8 x i8> @abs_8b(<8 x i8>* %A) no
define <16 x i8> @abs_16b(<16 x i8>* %A) nounwind {
;CHECK-LABEL: abs_16b:
;CHECK: abs.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <16 x i8> @llvm.aarch64.neon.abs.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp3
}
@@ -416,7 +416,7 @@ define <16 x i8> @abs_16b(<16 x i8>* %A)
define <4 x i16> @abs_4h(<4 x i16>* %A) nounwind {
;CHECK-LABEL: abs_4h:
;CHECK: abs.4h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.abs.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp3
}
@@ -424,7 +424,7 @@ define <4 x i16> @abs_4h(<4 x i16>* %A)
define <8 x i16> @abs_8h(<8 x i16>* %A) nounwind {
;CHECK-LABEL: abs_8h:
;CHECK: abs.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.abs.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp3
}
@@ -432,7 +432,7 @@ define <8 x i16> @abs_8h(<8 x i16>* %A)
define <2 x i32> @abs_2s(<2 x i32>* %A) nounwind {
;CHECK-LABEL: abs_2s:
;CHECK: abs.2s
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.abs.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp3
}
@@ -440,7 +440,7 @@ define <2 x i32> @abs_2s(<2 x i32>* %A)
define <4 x i32> @abs_4s(<4 x i32>* %A) nounwind {
;CHECK-LABEL: abs_4s:
;CHECK: abs.4s
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.abs.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp3
}
@@ -471,9 +471,9 @@ declare i64 @llvm.aarch64.neon.abs.i64(i
define <8 x i16> @sabal8h(<8 x i8>* %A, <8 x i8>* %B, <8 x i16>* %C) nounwind {
;CHECK-LABEL: sabal8h:
;CHECK: sabal.8h
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i16>* %C
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = load <8 x i16>, <8 x i16>* %C
%tmp4 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
%tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
%tmp5 = add <8 x i16> %tmp3, %tmp4.1
@@ -483,9 +483,9 @@ define <8 x i16> @sabal8h(<8 x i8>* %A,
define <4 x i32> @sabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
;CHECK-LABEL: sabal4s:
;CHECK: sabal.4s
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i32>* %C
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = load <4 x i32>, <4 x i32>* %C
%tmp4 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
%tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
%tmp5 = add <4 x i32> %tmp3, %tmp4.1
@@ -495,9 +495,9 @@ define <4 x i32> @sabal4s(<4 x i16>* %A,
define <2 x i64> @sabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
;CHECK-LABEL: sabal2d:
;CHECK: sabal.2d
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i64>* %C
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = load <2 x i64>, <2 x i64>* %C
%tmp4 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
%tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
%tmp4.1.1 = zext <2 x i32> %tmp4 to <2 x i64>
@@ -508,9 +508,9 @@ define <2 x i64> @sabal2d(<2 x i32>* %A,
define <8 x i16> @sabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwind {
;CHECK-LABEL: sabal2_8h:
;CHECK: sabal2.8h
- %load1 = load <16 x i8>* %A
- %load2 = load <16 x i8>* %B
- %tmp3 = load <8 x i16>* %C
+ %load1 = load <16 x i8>, <16 x i8>* %A
+ %load2 = load <16 x i8>, <16 x i8>* %B
+ %tmp3 = load <8 x i16>, <8 x i16>* %C
%tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%tmp4 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -522,9 +522,9 @@ define <8 x i16> @sabal2_8h(<16 x i8>* %
define <4 x i32> @sabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
;CHECK-LABEL: sabal2_4s:
;CHECK: sabal2.4s
- %load1 = load <8 x i16>* %A
- %load2 = load <8 x i16>* %B
- %tmp3 = load <4 x i32>* %C
+ %load1 = load <8 x i16>, <8 x i16>* %A
+ %load2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = load <4 x i32>, <4 x i32>* %C
%tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp4 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -536,9 +536,9 @@ define <4 x i32> @sabal2_4s(<8 x i16>* %
define <2 x i64> @sabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
;CHECK-LABEL: sabal2_2d:
;CHECK: sabal2.2d
- %load1 = load <4 x i32>* %A
- %load2 = load <4 x i32>* %B
- %tmp3 = load <2 x i64>* %C
+ %load1 = load <4 x i32>, <4 x i32>* %A
+ %load2 = load <4 x i32>, <4 x i32>* %B
+ %tmp3 = load <2 x i64>, <2 x i64>* %C
%tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp4 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -550,9 +550,9 @@ define <2 x i64> @sabal2_2d(<4 x i32>* %
define <8 x i16> @uabal8h(<8 x i8>* %A, <8 x i8>* %B, <8 x i16>* %C) nounwind {
;CHECK-LABEL: uabal8h:
;CHECK: uabal.8h
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
- %tmp3 = load <8 x i16>* %C
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
+ %tmp3 = load <8 x i16>, <8 x i16>* %C
%tmp4 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
%tmp4.1 = zext <8 x i8> %tmp4 to <8 x i16>
%tmp5 = add <8 x i16> %tmp3, %tmp4.1
@@ -562,9 +562,9 @@ define <8 x i16> @uabal8h(<8 x i8>* %A,
define <4 x i32> @uabal4s(<4 x i16>* %A, <4 x i16>* %B, <4 x i32>* %C) nounwind {
;CHECK-LABEL: uabal4s:
;CHECK: uabal.4s
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
- %tmp3 = load <4 x i32>* %C
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
+ %tmp3 = load <4 x i32>, <4 x i32>* %C
%tmp4 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
%tmp4.1 = zext <4 x i16> %tmp4 to <4 x i32>
%tmp5 = add <4 x i32> %tmp3, %tmp4.1
@@ -574,9 +574,9 @@ define <4 x i32> @uabal4s(<4 x i16>* %A,
define <2 x i64> @uabal2d(<2 x i32>* %A, <2 x i32>* %B, <2 x i64>* %C) nounwind {
;CHECK-LABEL: uabal2d:
;CHECK: uabal.2d
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
- %tmp3 = load <2 x i64>* %C
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
+ %tmp3 = load <2 x i64>, <2 x i64>* %C
%tmp4 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
%tmp4.1 = zext <2 x i32> %tmp4 to <2 x i64>
%tmp5 = add <2 x i64> %tmp3, %tmp4.1
@@ -586,9 +586,9 @@ define <2 x i64> @uabal2d(<2 x i32>* %A,
define <8 x i16> @uabal2_8h(<16 x i8>* %A, <16 x i8>* %B, <8 x i16>* %C) nounwind {
;CHECK-LABEL: uabal2_8h:
;CHECK: uabal2.8h
- %load1 = load <16 x i8>* %A
- %load2 = load <16 x i8>* %B
- %tmp3 = load <8 x i16>* %C
+ %load1 = load <16 x i8>, <16 x i8>* %A
+ %load2 = load <16 x i8>, <16 x i8>* %B
+ %tmp3 = load <8 x i16>, <8 x i16>* %C
%tmp1 = shufflevector <16 x i8> %load1, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%tmp2 = shufflevector <16 x i8> %load2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%tmp4 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
@@ -600,9 +600,9 @@ define <8 x i16> @uabal2_8h(<16 x i8>* %
define <4 x i32> @uabal2_4s(<8 x i16>* %A, <8 x i16>* %B, <4 x i32>* %C) nounwind {
;CHECK-LABEL: uabal2_4s:
;CHECK: uabal2.4s
- %load1 = load <8 x i16>* %A
- %load2 = load <8 x i16>* %B
- %tmp3 = load <4 x i32>* %C
+ %load1 = load <8 x i16>, <8 x i16>* %A
+ %load2 = load <8 x i16>, <8 x i16>* %B
+ %tmp3 = load <4 x i32>, <4 x i32>* %C
%tmp1 = shufflevector <8 x i16> %load1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp2 = shufflevector <8 x i16> %load2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%tmp4 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
@@ -614,9 +614,9 @@ define <4 x i32> @uabal2_4s(<8 x i16>* %
define <2 x i64> @uabal2_2d(<4 x i32>* %A, <4 x i32>* %B, <2 x i64>* %C) nounwind {
;CHECK-LABEL: uabal2_2d:
;CHECK: uabal2.2d
- %load1 = load <4 x i32>* %A
- %load2 = load <4 x i32>* %B
- %tmp3 = load <2 x i64>* %C
+ %load1 = load <4 x i32>, <4 x i32>* %A
+ %load2 = load <4 x i32>, <4 x i32>* %B
+ %tmp3 = load <2 x i64>, <2 x i64>* %C
%tmp1 = shufflevector <4 x i32> %load1, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp2 = shufflevector <4 x i32> %load2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%tmp4 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
@@ -628,10 +628,10 @@ define <2 x i64> @uabal2_2d(<4 x i32>* %
define <8 x i8> @saba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
;CHECK-LABEL: saba_8b:
;CHECK: saba.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.sabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- %tmp4 = load <8 x i8>* %C
+ %tmp4 = load <8 x i8>, <8 x i8>* %C
%tmp5 = add <8 x i8> %tmp3, %tmp4
ret <8 x i8> %tmp5
}
@@ -639,10 +639,10 @@ define <8 x i8> @saba_8b(<8 x i8>* %A, <
define <16 x i8> @saba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
;CHECK-LABEL: saba_16b:
;CHECK: saba.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = call <16 x i8> @llvm.aarch64.neon.sabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- %tmp4 = load <16 x i8>* %C
+ %tmp4 = load <16 x i8>, <16 x i8>* %C
%tmp5 = add <16 x i8> %tmp3, %tmp4
ret <16 x i8> %tmp5
}
@@ -650,10 +650,10 @@ define <16 x i8> @saba_16b(<16 x i8>* %A
define <4 x i16> @saba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
;CHECK-LABEL: saba_4h:
;CHECK: saba.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.sabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- %tmp4 = load <4 x i16>* %C
+ %tmp4 = load <4 x i16>, <4 x i16>* %C
%tmp5 = add <4 x i16> %tmp3, %tmp4
ret <4 x i16> %tmp5
}
@@ -661,10 +661,10 @@ define <4 x i16> @saba_4h(<4 x i16>* %A,
define <8 x i16> @saba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
;CHECK-LABEL: saba_8h:
;CHECK: saba.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = call <8 x i16> @llvm.aarch64.neon.sabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- %tmp4 = load <8 x i16>* %C
+ %tmp4 = load <8 x i16>, <8 x i16>* %C
%tmp5 = add <8 x i16> %tmp3, %tmp4
ret <8 x i16> %tmp5
}
@@ -672,10 +672,10 @@ define <8 x i16> @saba_8h(<8 x i16>* %A,
define <2 x i32> @saba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
;CHECK-LABEL: saba_2s:
;CHECK: saba.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.sabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- %tmp4 = load <2 x i32>* %C
+ %tmp4 = load <2 x i32>, <2 x i32>* %C
%tmp5 = add <2 x i32> %tmp3, %tmp4
ret <2 x i32> %tmp5
}
@@ -683,10 +683,10 @@ define <2 x i32> @saba_2s(<2 x i32>* %A,
define <4 x i32> @saba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
;CHECK-LABEL: saba_4s:
;CHECK: saba.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = call <4 x i32> @llvm.aarch64.neon.sabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- %tmp4 = load <4 x i32>* %C
+ %tmp4 = load <4 x i32>, <4 x i32>* %C
%tmp5 = add <4 x i32> %tmp3, %tmp4
ret <4 x i32> %tmp5
}
@@ -694,10 +694,10 @@ define <4 x i32> @saba_4s(<4 x i32>* %A,
define <8 x i8> @uaba_8b(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
;CHECK-LABEL: uaba_8b:
;CHECK: uaba.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.uabd.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
- %tmp4 = load <8 x i8>* %C
+ %tmp4 = load <8 x i8>, <8 x i8>* %C
%tmp5 = add <8 x i8> %tmp3, %tmp4
ret <8 x i8> %tmp5
}
@@ -705,10 +705,10 @@ define <8 x i8> @uaba_8b(<8 x i8>* %A, <
define <16 x i8> @uaba_16b(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind {
;CHECK-LABEL: uaba_16b:
;CHECK: uaba.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = call <16 x i8> @llvm.aarch64.neon.uabd.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
- %tmp4 = load <16 x i8>* %C
+ %tmp4 = load <16 x i8>, <16 x i8>* %C
%tmp5 = add <16 x i8> %tmp3, %tmp4
ret <16 x i8> %tmp5
}
@@ -716,10 +716,10 @@ define <16 x i8> @uaba_16b(<16 x i8>* %A
define <4 x i16> @uaba_4h(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
;CHECK-LABEL: uaba_4h:
;CHECK: uaba.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.uabd.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
- %tmp4 = load <4 x i16>* %C
+ %tmp4 = load <4 x i16>, <4 x i16>* %C
%tmp5 = add <4 x i16> %tmp3, %tmp4
ret <4 x i16> %tmp5
}
@@ -727,10 +727,10 @@ define <4 x i16> @uaba_4h(<4 x i16>* %A,
define <8 x i16> @uaba_8h(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
;CHECK-LABEL: uaba_8h:
;CHECK: uaba.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = call <8 x i16> @llvm.aarch64.neon.uabd.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
- %tmp4 = load <8 x i16>* %C
+ %tmp4 = load <8 x i16>, <8 x i16>* %C
%tmp5 = add <8 x i16> %tmp3, %tmp4
ret <8 x i16> %tmp5
}
@@ -738,10 +738,10 @@ define <8 x i16> @uaba_8h(<8 x i16>* %A,
define <2 x i32> @uaba_2s(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
;CHECK-LABEL: uaba_2s:
;CHECK: uaba.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.uabd.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
- %tmp4 = load <2 x i32>* %C
+ %tmp4 = load <2 x i32>, <2 x i32>* %C
%tmp5 = add <2 x i32> %tmp3, %tmp4
ret <2 x i32> %tmp5
}
@@ -749,10 +749,10 @@ define <2 x i32> @uaba_2s(<2 x i32>* %A,
define <4 x i32> @uaba_4s(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
;CHECK-LABEL: uaba_4s:
;CHECK: uaba.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = call <4 x i32> @llvm.aarch64.neon.uabd.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
- %tmp4 = load <4 x i32>* %C
+ %tmp4 = load <4 x i32>, <4 x i32>* %C
%tmp5 = add <4 x i32> %tmp3, %tmp4
ret <4 x i32> %tmp5
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vadd.ll Fri Feb 27 15:17:42 2015
@@ -3,8 +3,8 @@
define <8 x i8> @addhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: addhn8b:
;CHECK: addhn.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.addhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
ret <8 x i8> %tmp3
}
@@ -12,8 +12,8 @@ define <8 x i8> @addhn8b(<8 x i16>* %A,
define <4 x i16> @addhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: addhn4h:
;CHECK: addhn.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.addhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
ret <4 x i16> %tmp3
}
@@ -21,8 +21,8 @@ define <4 x i16> @addhn4h(<4 x i32>* %A,
define <2 x i32> @addhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: addhn2s:
;CHECK: addhn.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.addhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
ret <2 x i32> %tmp3
}
@@ -65,8 +65,8 @@ declare <8 x i8> @llvm.aarch64.neon.addh
define <8 x i8> @raddhn8b(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: raddhn8b:
;CHECK: raddhn.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.raddhn.v8i8(<8 x i16> %tmp1, <8 x i16> %tmp2)
ret <8 x i8> %tmp3
}
@@ -74,8 +74,8 @@ define <8 x i8> @raddhn8b(<8 x i16>* %A,
define <4 x i16> @raddhn4h(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: raddhn4h:
;CHECK: raddhn.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.raddhn.v4i16(<4 x i32> %tmp1, <4 x i32> %tmp2)
ret <4 x i16> %tmp3
}
@@ -83,8 +83,8 @@ define <4 x i16> @raddhn4h(<4 x i32>* %A
define <2 x i32> @raddhn2s(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: raddhn2s:
;CHECK: raddhn.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.raddhn.v2i32(<2 x i64> %tmp1, <2 x i64> %tmp2)
ret <2 x i32> %tmp3
}
@@ -126,8 +126,8 @@ declare <8 x i8> @llvm.aarch64.neon.radd
define <8 x i16> @saddl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: saddl8h:
;CHECK: saddl.8h
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = sext <8 x i8> %tmp1 to <8 x i16>
%tmp4 = sext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -137,8 +137,8 @@ define <8 x i16> @saddl8h(<8 x i8>* %A,
define <4 x i32> @saddl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: saddl4s:
;CHECK: saddl.4s
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = sext <4 x i16> %tmp1 to <4 x i32>
%tmp4 = sext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -148,8 +148,8 @@ define <4 x i32> @saddl4s(<4 x i16>* %A,
define <2 x i64> @saddl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: saddl2d:
;CHECK: saddl.2d
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = sext <2 x i32> %tmp1 to <2 x i64>
%tmp4 = sext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = add <2 x i64> %tmp3, %tmp4
@@ -207,8 +207,8 @@ define <2 x i64> @saddl2_2d(<4 x i32> %a
define <8 x i16> @uaddl8h(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: uaddl8h:
;CHECK: uaddl.8h
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = zext <8 x i8> %tmp1 to <8 x i16>
%tmp4 = zext <8 x i8> %tmp2 to <8 x i16>
%tmp5 = add <8 x i16> %tmp3, %tmp4
@@ -218,8 +218,8 @@ define <8 x i16> @uaddl8h(<8 x i8>* %A,
define <4 x i32> @uaddl4s(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: uaddl4s:
;CHECK: uaddl.4s
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = zext <4 x i16> %tmp1 to <4 x i32>
%tmp4 = zext <4 x i16> %tmp2 to <4 x i32>
%tmp5 = add <4 x i32> %tmp3, %tmp4
@@ -229,8 +229,8 @@ define <4 x i32> @uaddl4s(<4 x i16>* %A,
define <2 x i64> @uaddl2d(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: uaddl2d:
;CHECK: uaddl.2d
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = zext <2 x i32> %tmp1 to <2 x i64>
%tmp4 = zext <2 x i32> %tmp2 to <2 x i64>
%tmp5 = add <2 x i64> %tmp3, %tmp4
@@ -289,8 +289,8 @@ define <2 x i64> @uaddl2_2d(<4 x i32> %a
define <8 x i16> @uaddw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: uaddw8h:
;CHECK: uaddw.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = zext <8 x i8> %tmp2 to <8 x i16>
%tmp4 = add <8 x i16> %tmp1, %tmp3
ret <8 x i16> %tmp4
@@ -299,8 +299,8 @@ define <8 x i16> @uaddw8h(<8 x i16>* %A,
define <4 x i32> @uaddw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: uaddw4s:
;CHECK: uaddw.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = zext <4 x i16> %tmp2 to <4 x i32>
%tmp4 = add <4 x i32> %tmp1, %tmp3
ret <4 x i32> %tmp4
@@ -309,8 +309,8 @@ define <4 x i32> @uaddw4s(<4 x i32>* %A,
define <2 x i64> @uaddw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: uaddw2d:
;CHECK: uaddw.2d
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = zext <2 x i32> %tmp2 to <2 x i64>
%tmp4 = add <2 x i64> %tmp1, %tmp3
ret <2 x i64> %tmp4
@@ -319,9 +319,9 @@ define <2 x i64> @uaddw2d(<2 x i64>* %A,
define <8 x i16> @uaddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: uaddw2_8h:
;CHECK: uaddw2.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%ext2 = zext <8 x i8> %high2 to <8 x i16>
@@ -332,9 +332,9 @@ define <8 x i16> @uaddw2_8h(<8 x i16>* %
define <4 x i32> @uaddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: uaddw2_4s:
;CHECK: uaddw2.4s
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%ext2 = zext <4 x i16> %high2 to <4 x i32>
@@ -345,9 +345,9 @@ define <4 x i32> @uaddw2_4s(<4 x i32>* %
define <2 x i64> @uaddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: uaddw2_2d:
;CHECK: uaddw2.2d
- %tmp1 = load <2 x i64>* %A
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%ext2 = zext <2 x i32> %high2 to <2 x i64>
@@ -358,8 +358,8 @@ define <2 x i64> @uaddw2_2d(<2 x i64>* %
define <8 x i16> @saddw8h(<8 x i16>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: saddw8h:
;CHECK: saddw.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = sext <8 x i8> %tmp2 to <8 x i16>
%tmp4 = add <8 x i16> %tmp1, %tmp3
ret <8 x i16> %tmp4
@@ -368,8 +368,8 @@ define <8 x i16> @saddw8h(<8 x i16>* %A,
define <4 x i32> @saddw4s(<4 x i32>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: saddw4s:
;CHECK: saddw.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = sext <4 x i16> %tmp2 to <4 x i32>
%tmp4 = add <4 x i32> %tmp1, %tmp3
ret <4 x i32> %tmp4
@@ -378,8 +378,8 @@ define <4 x i32> @saddw4s(<4 x i32>* %A,
define <2 x i64> @saddw2d(<2 x i64>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: saddw2d:
;CHECK: saddw.2d
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = sext <2 x i32> %tmp2 to <2 x i64>
%tmp4 = add <2 x i64> %tmp1, %tmp3
ret <2 x i64> %tmp4
@@ -388,9 +388,9 @@ define <2 x i64> @saddw2d(<2 x i64>* %A,
define <8 x i16> @saddw2_8h(<8 x i16>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: saddw2_8h:
;CHECK: saddw2.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%high2 = shufflevector <16 x i8> %tmp2, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
%ext2 = sext <8 x i8> %high2 to <8 x i16>
@@ -401,9 +401,9 @@ define <8 x i16> @saddw2_8h(<8 x i16>* %
define <4 x i32> @saddw2_4s(<4 x i32>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: saddw2_4s:
;CHECK: saddw2.4s
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%high2 = shufflevector <8 x i16> %tmp2, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
%ext2 = sext <4 x i16> %high2 to <4 x i32>
@@ -414,9 +414,9 @@ define <4 x i32> @saddw2_4s(<4 x i32>* %
define <2 x i64> @saddw2_2d(<2 x i64>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: saddw2_2d:
;CHECK: saddw2.2d
- %tmp1 = load <2 x i64>* %A
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%high2 = shufflevector <4 x i32> %tmp2, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
%ext2 = sext <2 x i32> %high2 to <2 x i64>
@@ -427,7 +427,7 @@ define <2 x i64> @saddw2_2d(<2 x i64>* %
define <4 x i16> @saddlp4h(<8 x i8>* %A) nounwind {
;CHECK-LABEL: saddlp4h:
;CHECK: saddlp.4h
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
ret <4 x i16> %tmp3
}
@@ -435,7 +435,7 @@ define <4 x i16> @saddlp4h(<8 x i8>* %A)
define <2 x i32> @saddlp2s(<4 x i16>* %A) nounwind {
;CHECK-LABEL: saddlp2s:
;CHECK: saddlp.2s
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
ret <2 x i32> %tmp3
}
@@ -443,7 +443,7 @@ define <2 x i32> @saddlp2s(<4 x i16>* %A
define <1 x i64> @saddlp1d(<2 x i32>* %A) nounwind {
;CHECK-LABEL: saddlp1d:
;CHECK: saddlp.1d
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp3 = call <1 x i64> @llvm.aarch64.neon.saddlp.v1i64.v2i32(<2 x i32> %tmp1)
ret <1 x i64> %tmp3
}
@@ -451,7 +451,7 @@ define <1 x i64> @saddlp1d(<2 x i32>* %A
define <8 x i16> @saddlp8h(<16 x i8>* %A) nounwind {
;CHECK-LABEL: saddlp8h:
;CHECK: saddlp.8h
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
ret <8 x i16> %tmp3
}
@@ -459,7 +459,7 @@ define <8 x i16> @saddlp8h(<16 x i8>* %A
define <4 x i32> @saddlp4s(<8 x i16>* %A) nounwind {
;CHECK-LABEL: saddlp4s:
;CHECK: saddlp.4s
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
ret <4 x i32> %tmp3
}
@@ -467,7 +467,7 @@ define <4 x i32> @saddlp4s(<8 x i16>* %A
define <2 x i64> @saddlp2d(<4 x i32>* %A) nounwind {
;CHECK-LABEL: saddlp2d:
;CHECK: saddlp.2d
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
ret <2 x i64> %tmp3
}
@@ -483,7 +483,7 @@ declare <2 x i64> @llvm.aarch64.neon.sad
define <4 x i16> @uaddlp4h(<8 x i8>* %A) nounwind {
;CHECK-LABEL: uaddlp4h:
;CHECK: uaddlp.4h
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
ret <4 x i16> %tmp3
}
@@ -491,7 +491,7 @@ define <4 x i16> @uaddlp4h(<8 x i8>* %A)
define <2 x i32> @uaddlp2s(<4 x i16>* %A) nounwind {
;CHECK-LABEL: uaddlp2s:
;CHECK: uaddlp.2s
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
ret <2 x i32> %tmp3
}
@@ -499,7 +499,7 @@ define <2 x i32> @uaddlp2s(<4 x i16>* %A
define <1 x i64> @uaddlp1d(<2 x i32>* %A) nounwind {
;CHECK-LABEL: uaddlp1d:
;CHECK: uaddlp.1d
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp3 = call <1 x i64> @llvm.aarch64.neon.uaddlp.v1i64.v2i32(<2 x i32> %tmp1)
ret <1 x i64> %tmp3
}
@@ -507,7 +507,7 @@ define <1 x i64> @uaddlp1d(<2 x i32>* %A
define <8 x i16> @uaddlp8h(<16 x i8>* %A) nounwind {
;CHECK-LABEL: uaddlp8h:
;CHECK: uaddlp.8h
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
ret <8 x i16> %tmp3
}
@@ -515,7 +515,7 @@ define <8 x i16> @uaddlp8h(<16 x i8>* %A
define <4 x i32> @uaddlp4s(<8 x i16>* %A) nounwind {
;CHECK-LABEL: uaddlp4s:
;CHECK: uaddlp.4s
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
ret <4 x i32> %tmp3
}
@@ -523,7 +523,7 @@ define <4 x i32> @uaddlp4s(<8 x i16>* %A
define <2 x i64> @uaddlp2d(<4 x i32>* %A) nounwind {
;CHECK-LABEL: uaddlp2d:
;CHECK: uaddlp.2d
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
ret <2 x i64> %tmp3
}
@@ -539,9 +539,9 @@ declare <2 x i64> @llvm.aarch64.neon.uad
define <4 x i16> @sadalp4h(<8 x i8>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: sadalp4h:
;CHECK: sadalp.4h
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.saddlp.v4i16.v8i8(<8 x i8> %tmp1)
- %tmp4 = load <4 x i16>* %B
+ %tmp4 = load <4 x i16>, <4 x i16>* %B
%tmp5 = add <4 x i16> %tmp3, %tmp4
ret <4 x i16> %tmp5
}
@@ -549,9 +549,9 @@ define <4 x i16> @sadalp4h(<8 x i8>* %A,
define <2 x i32> @sadalp2s(<4 x i16>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: sadalp2s:
;CHECK: sadalp.2s
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.saddlp.v2i32.v4i16(<4 x i16> %tmp1)
- %tmp4 = load <2 x i32>* %B
+ %tmp4 = load <2 x i32>, <2 x i32>* %B
%tmp5 = add <2 x i32> %tmp3, %tmp4
ret <2 x i32> %tmp5
}
@@ -559,9 +559,9 @@ define <2 x i32> @sadalp2s(<4 x i16>* %A
define <8 x i16> @sadalp8h(<16 x i8>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: sadalp8h:
;CHECK: sadalp.8h
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.saddlp.v8i16.v16i8(<16 x i8> %tmp1)
- %tmp4 = load <8 x i16>* %B
+ %tmp4 = load <8 x i16>, <8 x i16>* %B
%tmp5 = add <8 x i16> %tmp3, %tmp4
ret <8 x i16> %tmp5
}
@@ -569,9 +569,9 @@ define <8 x i16> @sadalp8h(<16 x i8>* %A
define <4 x i32> @sadalp4s(<8 x i16>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: sadalp4s:
;CHECK: sadalp.4s
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.saddlp.v4i32.v8i16(<8 x i16> %tmp1)
- %tmp4 = load <4 x i32>* %B
+ %tmp4 = load <4 x i32>, <4 x i32>* %B
%tmp5 = add <4 x i32> %tmp3, %tmp4
ret <4 x i32> %tmp5
}
@@ -579,9 +579,9 @@ define <4 x i32> @sadalp4s(<8 x i16>* %A
define <2 x i64> @sadalp2d(<4 x i32>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: sadalp2d:
;CHECK: sadalp.2d
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <2 x i64> @llvm.aarch64.neon.saddlp.v2i64.v4i32(<4 x i32> %tmp1)
- %tmp4 = load <2 x i64>* %B
+ %tmp4 = load <2 x i64>, <2 x i64>* %B
%tmp5 = add <2 x i64> %tmp3, %tmp4
ret <2 x i64> %tmp5
}
@@ -589,9 +589,9 @@ define <2 x i64> @sadalp2d(<4 x i32>* %A
define <4 x i16> @uadalp4h(<8 x i8>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: uadalp4h:
;CHECK: uadalp.4h
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.uaddlp.v4i16.v8i8(<8 x i8> %tmp1)
- %tmp4 = load <4 x i16>* %B
+ %tmp4 = load <4 x i16>, <4 x i16>* %B
%tmp5 = add <4 x i16> %tmp3, %tmp4
ret <4 x i16> %tmp5
}
@@ -599,9 +599,9 @@ define <4 x i16> @uadalp4h(<8 x i8>* %A,
define <2 x i32> @uadalp2s(<4 x i16>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: uadalp2s:
;CHECK: uadalp.2s
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.uaddlp.v2i32.v4i16(<4 x i16> %tmp1)
- %tmp4 = load <2 x i32>* %B
+ %tmp4 = load <2 x i32>, <2 x i32>* %B
%tmp5 = add <2 x i32> %tmp3, %tmp4
ret <2 x i32> %tmp5
}
@@ -609,9 +609,9 @@ define <2 x i32> @uadalp2s(<4 x i16>* %A
define <8 x i16> @uadalp8h(<16 x i8>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: uadalp8h:
;CHECK: uadalp.8h
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.uaddlp.v8i16.v16i8(<16 x i8> %tmp1)
- %tmp4 = load <8 x i16>* %B
+ %tmp4 = load <8 x i16>, <8 x i16>* %B
%tmp5 = add <8 x i16> %tmp3, %tmp4
ret <8 x i16> %tmp5
}
@@ -619,9 +619,9 @@ define <8 x i16> @uadalp8h(<16 x i8>* %A
define <4 x i32> @uadalp4s(<8 x i16>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: uadalp4s:
;CHECK: uadalp.4s
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.uaddlp.v4i32.v8i16(<8 x i16> %tmp1)
- %tmp4 = load <4 x i32>* %B
+ %tmp4 = load <4 x i32>, <4 x i32>* %B
%tmp5 = add <4 x i32> %tmp3, %tmp4
ret <4 x i32> %tmp5
}
@@ -629,9 +629,9 @@ define <4 x i32> @uadalp4s(<8 x i16>* %A
define <2 x i64> @uadalp2d(<4 x i32>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: uadalp2d:
;CHECK: uadalp.2d
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <2 x i64> @llvm.aarch64.neon.uaddlp.v2i64.v4i32(<4 x i32> %tmp1)
- %tmp4 = load <2 x i64>* %B
+ %tmp4 = load <2 x i64>, <2 x i64>* %B
%tmp5 = add <2 x i64> %tmp3, %tmp4
ret <2 x i64> %tmp5
}
@@ -639,8 +639,8 @@ define <2 x i64> @uadalp2d(<4 x i32>* %A
define <8 x i8> @addp_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: addp_8b:
;CHECK: addp.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%tmp3 = call <8 x i8> @llvm.aarch64.neon.addp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
ret <8 x i8> %tmp3
}
@@ -648,8 +648,8 @@ define <8 x i8> @addp_8b(<8 x i8>* %A, <
define <16 x i8> @addp_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: addp_16b:
;CHECK: addp.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%tmp3 = call <16 x i8> @llvm.aarch64.neon.addp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
ret <16 x i8> %tmp3
}
@@ -657,8 +657,8 @@ define <16 x i8> @addp_16b(<16 x i8>* %A
define <4 x i16> @addp_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: addp_4h:
;CHECK: addp.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%tmp3 = call <4 x i16> @llvm.aarch64.neon.addp.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
ret <4 x i16> %tmp3
}
@@ -666,8 +666,8 @@ define <4 x i16> @addp_4h(<4 x i16>* %A,
define <8 x i16> @addp_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: addp_8h:
;CHECK: addp.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%tmp3 = call <8 x i16> @llvm.aarch64.neon.addp.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
ret <8 x i16> %tmp3
}
@@ -675,8 +675,8 @@ define <8 x i16> @addp_8h(<8 x i16>* %A,
define <2 x i32> @addp_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: addp_2s:
;CHECK: addp.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.addp.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
ret <2 x i32> %tmp3
}
@@ -684,8 +684,8 @@ define <2 x i32> @addp_2s(<2 x i32>* %A,
define <4 x i32> @addp_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: addp_4s:
;CHECK: addp.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%tmp3 = call <4 x i32> @llvm.aarch64.neon.addp.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
ret <4 x i32> %tmp3
}
@@ -693,8 +693,8 @@ define <4 x i32> @addp_4s(<4 x i32>* %A,
define <2 x i64> @addp_2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: addp_2d:
;CHECK: addp.2d
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%tmp3 = call <2 x i64> @llvm.aarch64.neon.addp.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
ret <2 x i64> %tmp3
}
@@ -710,8 +710,8 @@ declare <2 x i64> @llvm.aarch64.neon.add
define <2 x float> @faddp_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: faddp_2s:
;CHECK: faddp.2s
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
%tmp3 = call <2 x float> @llvm.aarch64.neon.addp.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
ret <2 x float> %tmp3
}
@@ -719,8 +719,8 @@ define <2 x float> @faddp_2s(<2 x float>
define <4 x float> @faddp_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK-LABEL: faddp_4s:
;CHECK: faddp.4s
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
%tmp3 = call <4 x float> @llvm.aarch64.neon.addp.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
ret <4 x float> %tmp3
}
@@ -728,8 +728,8 @@ define <4 x float> @faddp_4s(<4 x float>
define <2 x double> @faddp_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
;CHECK-LABEL: faddp_2d:
;CHECK: faddp.2d
- %tmp1 = load <2 x double>* %A
- %tmp2 = load <2 x double>* %B
+ %tmp1 = load <2 x double>, <2 x double>* %A
+ %tmp2 = load <2 x double>, <2 x double>* %B
%tmp3 = call <2 x double> @llvm.aarch64.neon.addp.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
ret <2 x double> %tmp3
}
@@ -805,8 +805,8 @@ define <2 x i64> @ssubl2_duplhs(i32 %lhs
define <8 x i8> @addhn8b_natural(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: addhn8b_natural:
;CHECK: addhn.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%sum = add <8 x i16> %tmp1, %tmp2
%high_bits = lshr <8 x i16> %sum, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
%narrowed = trunc <8 x i16> %high_bits to <8 x i8>
@@ -816,8 +816,8 @@ define <8 x i8> @addhn8b_natural(<8 x i1
define <4 x i16> @addhn4h_natural(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: addhn4h_natural:
;CHECK: addhn.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%sum = add <4 x i32> %tmp1, %tmp2
%high_bits = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
%narrowed = trunc <4 x i32> %high_bits to <4 x i16>
@@ -827,8 +827,8 @@ define <4 x i16> @addhn4h_natural(<4 x i
define <2 x i32> @addhn2s_natural(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: addhn2s_natural:
;CHECK: addhn.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%sum = add <2 x i64> %tmp1, %tmp2
%high_bits = lshr <2 x i64> %sum, <i64 32, i64 32>
%narrowed = trunc <2 x i64> %high_bits to <2 x i32>
@@ -838,8 +838,8 @@ define <2 x i32> @addhn2s_natural(<2 x i
define <16 x i8> @addhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: addhn2_16b_natural:
;CHECK: addhn2.16b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%sum = add <8 x i16> %tmp1, %tmp2
%high_bits = lshr <8 x i16> %sum, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
%narrowed = trunc <8 x i16> %high_bits to <8 x i8>
@@ -850,8 +850,8 @@ define <16 x i8> @addhn2_16b_natural(<8
define <8 x i16> @addhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: addhn2_8h_natural:
;CHECK: addhn2.8h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%sum = add <4 x i32> %tmp1, %tmp2
%high_bits = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
%narrowed = trunc <4 x i32> %high_bits to <4 x i16>
@@ -862,8 +862,8 @@ define <8 x i16> @addhn2_8h_natural(<4 x
define <4 x i32> @addhn2_4s_natural(<2 x i32> %low, <2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: addhn2_4s_natural:
;CHECK: addhn2.4s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%sum = add <2 x i64> %tmp1, %tmp2
%high_bits = lshr <2 x i64> %sum, <i64 32, i64 32>
%narrowed = trunc <2 x i64> %high_bits to <2 x i32>
@@ -874,8 +874,8 @@ define <4 x i32> @addhn2_4s_natural(<2 x
define <8 x i8> @subhn8b_natural(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: subhn8b_natural:
;CHECK: subhn.8b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%diff = sub <8 x i16> %tmp1, %tmp2
%high_bits = lshr <8 x i16> %diff, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
%narrowed = trunc <8 x i16> %high_bits to <8 x i8>
@@ -885,8 +885,8 @@ define <8 x i8> @subhn8b_natural(<8 x i1
define <4 x i16> @subhn4h_natural(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: subhn4h_natural:
;CHECK: subhn.4h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%diff = sub <4 x i32> %tmp1, %tmp2
%high_bits = lshr <4 x i32> %diff, <i32 16, i32 16, i32 16, i32 16>
%narrowed = trunc <4 x i32> %high_bits to <4 x i16>
@@ -896,8 +896,8 @@ define <4 x i16> @subhn4h_natural(<4 x i
define <2 x i32> @subhn2s_natural(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: subhn2s_natural:
;CHECK: subhn.2s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%diff = sub <2 x i64> %tmp1, %tmp2
%high_bits = lshr <2 x i64> %diff, <i64 32, i64 32>
%narrowed = trunc <2 x i64> %high_bits to <2 x i32>
@@ -907,8 +907,8 @@ define <2 x i32> @subhn2s_natural(<2 x i
define <16 x i8> @subhn2_16b_natural(<8 x i8> %low, <8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: subhn2_16b_natural:
;CHECK: subhn2.16b
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%diff = sub <8 x i16> %tmp1, %tmp2
%high_bits = lshr <8 x i16> %diff, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
%narrowed = trunc <8 x i16> %high_bits to <8 x i8>
@@ -919,8 +919,8 @@ define <16 x i8> @subhn2_16b_natural(<8
define <8 x i16> @subhn2_8h_natural(<4 x i16> %low, <4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: subhn2_8h_natural:
;CHECK: subhn2.8h
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%diff = sub <4 x i32> %tmp1, %tmp2
%high_bits = lshr <4 x i32> %diff, <i32 16, i32 16, i32 16, i32 16>
%narrowed = trunc <4 x i32> %high_bits to <4 x i16>
@@ -931,8 +931,8 @@ define <8 x i16> @subhn2_8h_natural(<4 x
define <4 x i32> @subhn2_4s_natural(<2 x i32> %low, <2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: subhn2_4s_natural:
;CHECK: subhn2.4s
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%diff = sub <2 x i64> %tmp1, %tmp2
%high_bits = lshr <2 x i64> %diff, <i64 32, i64 32>
%narrowed = trunc <2 x i64> %high_bits to <2 x i32>
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vbitwise.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
define <8 x i8> @rbit_8b(<8 x i8>* %A) nounwind {
;CHECK-LABEL: rbit_8b:
;CHECK: rbit.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <8 x i8> @llvm.aarch64.neon.rbit.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp3
}
@@ -11,7 +11,7 @@ define <8 x i8> @rbit_8b(<8 x i8>* %A) n
define <16 x i8> @rbit_16b(<16 x i8>* %A) nounwind {
;CHECK-LABEL: rbit_16b:
;CHECK: rbit.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <16 x i8> @llvm.aarch64.neon.rbit.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp3
}
@@ -22,7 +22,7 @@ declare <16 x i8> @llvm.aarch64.neon.rbi
define <8 x i16> @sxtl8h(<8 x i8>* %A) nounwind {
;CHECK-LABEL: sxtl8h:
;CHECK: sshll.8h
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = sext <8 x i8> %tmp1 to <8 x i16>
ret <8 x i16> %tmp2
}
@@ -30,7 +30,7 @@ define <8 x i16> @sxtl8h(<8 x i8>* %A) n
define <8 x i16> @uxtl8h(<8 x i8>* %A) nounwind {
;CHECK-LABEL: uxtl8h:
;CHECK: ushll.8h
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp2 = zext <8 x i8> %tmp1 to <8 x i16>
ret <8 x i16> %tmp2
}
@@ -38,7 +38,7 @@ define <8 x i16> @uxtl8h(<8 x i8>* %A) n
define <4 x i32> @sxtl4s(<4 x i16>* %A) nounwind {
;CHECK-LABEL: sxtl4s:
;CHECK: sshll.4s
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = sext <4 x i16> %tmp1 to <4 x i32>
ret <4 x i32> %tmp2
}
@@ -46,7 +46,7 @@ define <4 x i32> @sxtl4s(<4 x i16>* %A)
define <4 x i32> @uxtl4s(<4 x i16>* %A) nounwind {
;CHECK-LABEL: uxtl4s:
;CHECK: ushll.4s
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp2 = zext <4 x i16> %tmp1 to <4 x i32>
ret <4 x i32> %tmp2
}
@@ -54,7 +54,7 @@ define <4 x i32> @uxtl4s(<4 x i16>* %A)
define <2 x i64> @sxtl2d(<2 x i32>* %A) nounwind {
;CHECK-LABEL: sxtl2d:
;CHECK: sshll.2d
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = sext <2 x i32> %tmp1 to <2 x i64>
ret <2 x i64> %tmp2
}
@@ -62,7 +62,7 @@ define <2 x i64> @sxtl2d(<2 x i32>* %A)
define <2 x i64> @uxtl2d(<2 x i32>* %A) nounwind {
;CHECK-LABEL: uxtl2d:
;CHECK: ushll.2d
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp2 = zext <2 x i32> %tmp1 to <2 x i64>
ret <2 x i64> %tmp2
}
@@ -76,7 +76,7 @@ entry:
; CHECK: movi.2d [[REG1:v[0-9]+]], #0x0000ff000000ff
; CHECK: and.16b v{{[0-9]+}}, v{{[0-9]+}}, [[REG1]]
%0 = bitcast i8* %src to <16 x i8>*
- %1 = load <16 x i8>* %0, align 16
+ %1 = load <16 x i8>, <16 x i8>* %0, align 16
%and.i = and <16 x i8> %1, <i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0, i8 -1, i8 0, i8 0, i8 0>
%2 = bitcast <16 x i8> %and.i to <8 x i16>
%vshl_n = shl <8 x i16> %2, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vcmp.ll Fri Feb 27 15:17:42 2015
@@ -16,8 +16,8 @@ define void @fcmltz_4s(<4 x float> %a, <
define <2 x i32> @facge_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: facge_2s:
;CHECK: facge.2s
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.facge.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
ret <2 x i32> %tmp3
}
@@ -25,8 +25,8 @@ define <2 x i32> @facge_2s(<2 x float>*
define <4 x i32> @facge_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK-LABEL: facge_4s:
;CHECK: facge.4s
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
%tmp3 = call <4 x i32> @llvm.aarch64.neon.facge.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
ret <4 x i32> %tmp3
}
@@ -34,8 +34,8 @@ define <4 x i32> @facge_4s(<4 x float>*
define <2 x i64> @facge_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
;CHECK-LABEL: facge_2d:
;CHECK: facge.2d
- %tmp1 = load <2 x double>* %A
- %tmp2 = load <2 x double>* %B
+ %tmp1 = load <2 x double>, <2 x double>* %A
+ %tmp2 = load <2 x double>, <2 x double>* %B
%tmp3 = call <2 x i64> @llvm.aarch64.neon.facge.v2i64.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
ret <2 x i64> %tmp3
}
@@ -47,8 +47,8 @@ declare <2 x i64> @llvm.aarch64.neon.fac
define <2 x i32> @facgt_2s(<2 x float>* %A, <2 x float>* %B) nounwind {
;CHECK-LABEL: facgt_2s:
;CHECK: facgt.2s
- %tmp1 = load <2 x float>* %A
- %tmp2 = load <2 x float>* %B
+ %tmp1 = load <2 x float>, <2 x float>* %A
+ %tmp2 = load <2 x float>, <2 x float>* %B
%tmp3 = call <2 x i32> @llvm.aarch64.neon.facgt.v2i32.v2f32(<2 x float> %tmp1, <2 x float> %tmp2)
ret <2 x i32> %tmp3
}
@@ -56,8 +56,8 @@ define <2 x i32> @facgt_2s(<2 x float>*
define <4 x i32> @facgt_4s(<4 x float>* %A, <4 x float>* %B) nounwind {
;CHECK-LABEL: facgt_4s:
;CHECK: facgt.4s
- %tmp1 = load <4 x float>* %A
- %tmp2 = load <4 x float>* %B
+ %tmp1 = load <4 x float>, <4 x float>* %A
+ %tmp2 = load <4 x float>, <4 x float>* %B
%tmp3 = call <4 x i32> @llvm.aarch64.neon.facgt.v4i32.v4f32(<4 x float> %tmp1, <4 x float> %tmp2)
ret <4 x i32> %tmp3
}
@@ -65,8 +65,8 @@ define <4 x i32> @facgt_4s(<4 x float>*
define <2 x i64> @facgt_2d(<2 x double>* %A, <2 x double>* %B) nounwind {
;CHECK-LABEL: facgt_2d:
;CHECK: facgt.2d
- %tmp1 = load <2 x double>* %A
- %tmp2 = load <2 x double>* %B
+ %tmp1 = load <2 x double>, <2 x double>* %A
+ %tmp2 = load <2 x double>, <2 x double>* %B
%tmp3 = call <2 x i64> @llvm.aarch64.neon.facgt.v2i64.v2f64(<2 x double> %tmp1, <2 x double> %tmp2)
ret <2 x i64> %tmp3
}
@@ -112,8 +112,8 @@ declare i32 @llvm.aarch64.neon.facgt.i32
define <8 x i8> @cmtst_8b(<8 x i8>* %A, <8 x i8>* %B) nounwind {
;CHECK-LABEL: cmtst_8b:
;CHECK: cmtst.8b
- %tmp1 = load <8 x i8>* %A
- %tmp2 = load <8 x i8>* %B
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
+ %tmp2 = load <8 x i8>, <8 x i8>* %B
%commonbits = and <8 x i8> %tmp1, %tmp2
%mask = icmp ne <8 x i8> %commonbits, zeroinitializer
%res = sext <8 x i1> %mask to <8 x i8>
@@ -123,8 +123,8 @@ define <8 x i8> @cmtst_8b(<8 x i8>* %A,
define <16 x i8> @cmtst_16b(<16 x i8>* %A, <16 x i8>* %B) nounwind {
;CHECK-LABEL: cmtst_16b:
;CHECK: cmtst.16b
- %tmp1 = load <16 x i8>* %A
- %tmp2 = load <16 x i8>* %B
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
+ %tmp2 = load <16 x i8>, <16 x i8>* %B
%commonbits = and <16 x i8> %tmp1, %tmp2
%mask = icmp ne <16 x i8> %commonbits, zeroinitializer
%res = sext <16 x i1> %mask to <16 x i8>
@@ -134,8 +134,8 @@ define <16 x i8> @cmtst_16b(<16 x i8>* %
define <4 x i16> @cmtst_4h(<4 x i16>* %A, <4 x i16>* %B) nounwind {
;CHECK-LABEL: cmtst_4h:
;CHECK: cmtst.4h
- %tmp1 = load <4 x i16>* %A
- %tmp2 = load <4 x i16>* %B
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
+ %tmp2 = load <4 x i16>, <4 x i16>* %B
%commonbits = and <4 x i16> %tmp1, %tmp2
%mask = icmp ne <4 x i16> %commonbits, zeroinitializer
%res = sext <4 x i1> %mask to <4 x i16>
@@ -145,8 +145,8 @@ define <4 x i16> @cmtst_4h(<4 x i16>* %A
define <8 x i16> @cmtst_8h(<8 x i16>* %A, <8 x i16>* %B) nounwind {
;CHECK-LABEL: cmtst_8h:
;CHECK: cmtst.8h
- %tmp1 = load <8 x i16>* %A
- %tmp2 = load <8 x i16>* %B
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
+ %tmp2 = load <8 x i16>, <8 x i16>* %B
%commonbits = and <8 x i16> %tmp1, %tmp2
%mask = icmp ne <8 x i16> %commonbits, zeroinitializer
%res = sext <8 x i1> %mask to <8 x i16>
@@ -156,8 +156,8 @@ define <8 x i16> @cmtst_8h(<8 x i16>* %A
define <2 x i32> @cmtst_2s(<2 x i32>* %A, <2 x i32>* %B) nounwind {
;CHECK-LABEL: cmtst_2s:
;CHECK: cmtst.2s
- %tmp1 = load <2 x i32>* %A
- %tmp2 = load <2 x i32>* %B
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
+ %tmp2 = load <2 x i32>, <2 x i32>* %B
%commonbits = and <2 x i32> %tmp1, %tmp2
%mask = icmp ne <2 x i32> %commonbits, zeroinitializer
%res = sext <2 x i1> %mask to <2 x i32>
@@ -167,8 +167,8 @@ define <2 x i32> @cmtst_2s(<2 x i32>* %A
define <4 x i32> @cmtst_4s(<4 x i32>* %A, <4 x i32>* %B) nounwind {
;CHECK-LABEL: cmtst_4s:
;CHECK: cmtst.4s
- %tmp1 = load <4 x i32>* %A
- %tmp2 = load <4 x i32>* %B
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
+ %tmp2 = load <4 x i32>, <4 x i32>* %B
%commonbits = and <4 x i32> %tmp1, %tmp2
%mask = icmp ne <4 x i32> %commonbits, zeroinitializer
%res = sext <4 x i1> %mask to <4 x i32>
@@ -178,8 +178,8 @@ define <4 x i32> @cmtst_4s(<4 x i32>* %A
define <2 x i64> @cmtst_2d(<2 x i64>* %A, <2 x i64>* %B) nounwind {
;CHECK-LABEL: cmtst_2d:
;CHECK: cmtst.2d
- %tmp1 = load <2 x i64>* %A
- %tmp2 = load <2 x i64>* %B
+ %tmp1 = load <2 x i64>, <2 x i64>* %A
+ %tmp2 = load <2 x i64>, <2 x i64>* %B
%commonbits = and <2 x i64> %tmp1, %tmp2
%mask = icmp ne <2 x i64> %commonbits, zeroinitializer
%res = sext <2 x i1> %mask to <2 x i64>
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vcnt.ll Fri Feb 27 15:17:42 2015
@@ -3,7 +3,7 @@
define <8 x i8> @cls_8b(<8 x i8>* %A) nounwind {
;CHECK-LABEL: cls_8b:
;CHECK: cls.8b
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = call <8 x i8> @llvm.aarch64.neon.cls.v8i8(<8 x i8> %tmp1)
ret <8 x i8> %tmp3
}
@@ -11,7 +11,7 @@ define <8 x i8> @cls_8b(<8 x i8>* %A) no
define <16 x i8> @cls_16b(<16 x i8>* %A) nounwind {
;CHECK-LABEL: cls_16b:
;CHECK: cls.16b
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = call <16 x i8> @llvm.aarch64.neon.cls.v16i8(<16 x i8> %tmp1)
ret <16 x i8> %tmp3
}
@@ -19,7 +19,7 @@ define <16 x i8> @cls_16b(<16 x i8>* %A)
define <4 x i16> @cls_4h(<4 x i16>* %A) nounwind {
;CHECK-LABEL: cls_4h:
;CHECK: cls.4h
- %tmp1 = load <4 x i16>* %A
+ %tmp1 = load <4 x i16>, <4 x i16>* %A
%tmp3 = call <4 x i16> @llvm.aarch64.neon.cls.v4i16(<4 x i16> %tmp1)
ret <4 x i16> %tmp3
}
@@ -27,7 +27,7 @@ define <4 x i16> @cls_4h(<4 x i16>* %A)
define <8 x i16> @cls_8h(<8 x i16>* %A) nounwind {
;CHECK-LABEL: cls_8h:
;CHECK: cls.8h
- %tmp1 = load <8 x i16>* %A
+ %tmp1 = load <8 x i16>, <8 x i16>* %A
%tmp3 = call <8 x i16> @llvm.aarch64.neon.cls.v8i16(<8 x i16> %tmp1)
ret <8 x i16> %tmp3
}
@@ -35,7 +35,7 @@ define <8 x i16> @cls_8h(<8 x i16>* %A)
define <2 x i32> @cls_2s(<2 x i32>* %A) nounwind {
;CHECK-LABEL: cls_2s:
;CHECK: cls.2s
- %tmp1 = load <2 x i32>* %A
+ %tmp1 = load <2 x i32>, <2 x i32>* %A
%tmp3 = call <2 x i32> @llvm.aarch64.neon.cls.v2i32(<2 x i32> %tmp1)
ret <2 x i32> %tmp3
}
@@ -43,7 +43,7 @@ define <2 x i32> @cls_2s(<2 x i32>* %A)
define <4 x i32> @cls_4s(<4 x i32>* %A) nounwind {
;CHECK-LABEL: cls_4s:
;CHECK: cls.4s
- %tmp1 = load <4 x i32>* %A
+ %tmp1 = load <4 x i32>, <4 x i32>* %A
%tmp3 = call <4 x i32> @llvm.aarch64.neon.cls.v4i32(<4 x i32> %tmp1)
ret <4 x i32> %tmp3
}
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vcvt.ll Fri Feb 27 15:17:42 2015
@@ -675,7 +675,7 @@ define void @autogen_SD28458(<8 x double
;CHECK: fcvt
;CHECK: ret
define void @autogen_SD19225(<8 x double>* %addr.f64, <8 x float>* %addr.f32) {
- %A = load <8 x float>* %addr.f32
+ %A = load <8 x float>, <8 x float>* %addr.f32
%Tr53 = fpext <8 x float> %A to <8 x double>
store <8 x double> %Tr53, <8 x double>* %addr.f64
ret void
Modified: llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll?rev=230794&r1=230793&r2=230794&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-vector-imm.ll Fri Feb 27 15:17:42 2015
@@ -5,7 +5,7 @@ define <8 x i8> @v_orrimm(<8 x i8>* %A)
; CHECK-NOT: mov
; CHECK-NOT: mvn
; CHECK: orr
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = or <8 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
ret <8 x i8> %tmp3
}
@@ -15,7 +15,7 @@ define <16 x i8> @v_orrimmQ(<16 x i8>* %
; CHECK-NOT: mov
; CHECK-NOT: mvn
; CHECK: orr
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = or <16 x i8> %tmp1, <i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1, i8 0, i8 0, i8 0, i8 1>
ret <16 x i8> %tmp3
}
@@ -25,7 +25,7 @@ define <8 x i8> @v_bicimm(<8 x i8>* %A)
; CHECK-NOT: mov
; CHECK-NOT: mvn
; CHECK: bic
- %tmp1 = load <8 x i8>* %A
+ %tmp1 = load <8 x i8>, <8 x i8>* %A
%tmp3 = and <8 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
ret <8 x i8> %tmp3
}
@@ -35,7 +35,7 @@ define <16 x i8> @v_bicimmQ(<16 x i8>* %
; CHECK-NOT: mov
; CHECK-NOT: mvn
; CHECK: bic
- %tmp1 = load <16 x i8>* %A
+ %tmp1 = load <16 x i8>, <16 x i8>* %A
%tmp3 = and <16 x i8> %tmp1, < i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0, i8 -1, i8 -1, i8 -1, i8 0 >
ret <16 x i8> %tmp3
}
More information about the llvm-commits
mailing list