[llvm] r327271 - [Hexagon] Add more lit tests

Krzysztof Parzyszek via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 12 07:01:30 PDT 2018


Author: kparzysz
Date: Mon Mar 12 07:01:28 2018
New Revision: 327271

URL: http://llvm.org/viewvc/llvm-project?rev=327271&view=rev
Log:
[Hexagon] Add more lit tests

Added:
    llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll
    llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll
    llvm/trunk/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll
    llvm/trunk/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll
    llvm/trunk/test/CodeGen/Hexagon/P08214.ll
    llvm/trunk/test/CodeGen/Hexagon/V60-VDblNew.ll
    llvm/trunk/test/CodeGen/Hexagon/add-use.ll
    llvm/trunk/test/CodeGen/Hexagon/add_int_double.ll
    llvm/trunk/test/CodeGen/Hexagon/add_mpi_RRR.ll
    llvm/trunk/test/CodeGen/Hexagon/addasl-address.ll
    llvm/trunk/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll
    llvm/trunk/test/CodeGen/Hexagon/addrmode-offset.ll
    llvm/trunk/test/CodeGen/Hexagon/addrmode.ll
    llvm/trunk/test/CodeGen/Hexagon/aggr-antidep-tied.ll
    llvm/trunk/test/CodeGen/Hexagon/aggr-copy-order.ll
    llvm/trunk/test/CodeGen/Hexagon/aggr-licm.ll
    llvm/trunk/test/CodeGen/Hexagon/aggressive_licm.ll
    llvm/trunk/test/CodeGen/Hexagon/align_Os.ll
    llvm/trunk/test/CodeGen/Hexagon/align_test.ll
    llvm/trunk/test/CodeGen/Hexagon/asr-rnd.ll
    llvm/trunk/test/CodeGen/Hexagon/asr-rnd64.ll
    llvm/trunk/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll
    llvm/trunk/test/CodeGen/Hexagon/avoidVectorLowering.ll
    llvm/trunk/test/CodeGen/Hexagon/base-offset-stv4.ll
    llvm/trunk/test/CodeGen/Hexagon/bkfir.ll
    llvm/trunk/test/CodeGen/Hexagon/block-address.ll
    llvm/trunk/test/CodeGen/Hexagon/blockaddr-fpic.ll
    llvm/trunk/test/CodeGen/Hexagon/brcond-setne.ll
    llvm/trunk/test/CodeGen/Hexagon/bss-local.ll
    llvm/trunk/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll
    llvm/trunk/test/CodeGen/Hexagon/bug-allocframe-size.ll
    llvm/trunk/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll
    llvm/trunk/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll
    llvm/trunk/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll
    llvm/trunk/test/CodeGen/Hexagon/bug15515-shuffle.ll
    llvm/trunk/test/CodeGen/Hexagon/bug17276.ll
    llvm/trunk/test/CodeGen/Hexagon/bug17386.ll
    llvm/trunk/test/CodeGen/Hexagon/bug18008.ll
    llvm/trunk/test/CodeGen/Hexagon/bug18491-optsize.ll
    llvm/trunk/test/CodeGen/Hexagon/bug19076.ll
    llvm/trunk/test/CodeGen/Hexagon/bug19119.ll
    llvm/trunk/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll
    llvm/trunk/test/CodeGen/Hexagon/bug27085.ll
    llvm/trunk/test/CodeGen/Hexagon/bug31839.ll
    llvm/trunk/test/CodeGen/Hexagon/bug6757-endloop.ll
    llvm/trunk/test/CodeGen/Hexagon/bug9049.ll
    llvm/trunk/test/CodeGen/Hexagon/bug9963.ll
    llvm/trunk/test/CodeGen/Hexagon/call-long1.ll
    llvm/trunk/test/CodeGen/Hexagon/call-v4.ll
    llvm/trunk/test/CodeGen/Hexagon/callR_noreturn.ll
    llvm/trunk/test/CodeGen/Hexagon/calling-conv.ll
    llvm/trunk/test/CodeGen/Hexagon/cext-ice.ll
    llvm/trunk/test/CodeGen/Hexagon/cfi-late-and-regpressure-init.ll
    llvm/trunk/test/CodeGen/Hexagon/cfi_offset.ll
    llvm/trunk/test/CodeGen/Hexagon/cfi_offset2.ll
    llvm/trunk/test/CodeGen/Hexagon/check-dot-new.ll
    llvm/trunk/test/CodeGen/Hexagon/circ_pcr_assert.ll
    llvm/trunk/test/CodeGen/Hexagon/cmpb_gtu.ll
    llvm/trunk/test/CodeGen/Hexagon/cmpbeq.ll
    llvm/trunk/test/CodeGen/Hexagon/cmpy-round.ll
    llvm/trunk/test/CodeGen/Hexagon/coalesce_tfri.ll
    llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext.ll
    llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext2.ll
    llvm/trunk/test/CodeGen/Hexagon/combine_lh.ll
    llvm/trunk/test/CodeGen/Hexagon/combiner-lts.ll
    llvm/trunk/test/CodeGen/Hexagon/common-global-addr.ll
    llvm/trunk/test/CodeGen/Hexagon/concat-vectors-legalize.ll
    llvm/trunk/test/CodeGen/Hexagon/const-combine.ll
    llvm/trunk/test/CodeGen/Hexagon/constext-call.ll
    llvm/trunk/test/CodeGen/Hexagon/constext-immstore.ll
    llvm/trunk/test/CodeGen/Hexagon/constext-replace.ll
    llvm/trunk/test/CodeGen/Hexagon/count_0s.ll
    llvm/trunk/test/CodeGen/Hexagon/csr-stubs-spill-threshold.ll
    llvm/trunk/test/CodeGen/Hexagon/csr_stub_calls_dwarf_frame_info.ll
    llvm/trunk/test/CodeGen/Hexagon/dag-combine-select-or0.ll
    llvm/trunk/test/CodeGen/Hexagon/dag-indexed.ll
    llvm/trunk/test/CodeGen/Hexagon/dccleana.ll
    llvm/trunk/test/CodeGen/Hexagon/dealloc-store.ll
    llvm/trunk/test/CodeGen/Hexagon/dealloc_return.ll
    llvm/trunk/test/CodeGen/Hexagon/debug-line_table_start.ll
    llvm/trunk/test/CodeGen/Hexagon/debug-prologue-loc.ll
    llvm/trunk/test/CodeGen/Hexagon/debug-prologue.ll
    llvm/trunk/test/CodeGen/Hexagon/def-undef-deps.ll
    llvm/trunk/test/CodeGen/Hexagon/default-align.ll
    llvm/trunk/test/CodeGen/Hexagon/deflate.ll
    llvm/trunk/test/CodeGen/Hexagon/dhry.ll
    llvm/trunk/test/CodeGen/Hexagon/dhry_proc8.ll
    llvm/trunk/test/CodeGen/Hexagon/dhry_stall.ll
    llvm/trunk/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll
    llvm/trunk/test/CodeGen/Hexagon/dwarf-discriminator.ll
    llvm/trunk/test/CodeGen/Hexagon/eh_return-r30.ll
    llvm/trunk/test/CodeGen/Hexagon/eh_save_restore.ll
    llvm/trunk/test/CodeGen/Hexagon/ehabi.ll
    llvm/trunk/test/CodeGen/Hexagon/entryBB-isLoopHdr.ll
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-dead.ll
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets-pred-undef2.ll
    llvm/trunk/test/CodeGen/Hexagon/expand-condsets.ll
    llvm/trunk/test/CodeGen/Hexagon/extlow.ll
    llvm/trunk/test/CodeGen/Hexagon/extract_0bits.ll
    llvm/trunk/test/CodeGen/Hexagon/extractu_0bits.ll
    llvm/trunk/test/CodeGen/Hexagon/find-loop.ll
    llvm/trunk/test/CodeGen/Hexagon/float-bitcast.ll
    llvm/trunk/test/CodeGen/Hexagon/float-const64-G0.ll
    llvm/trunk/test/CodeGen/Hexagon/float-gen-cmpop.ll
    llvm/trunk/test/CodeGen/Hexagon/fltnvjump.ll
    llvm/trunk/test/CodeGen/Hexagon/fmadd.ll
    llvm/trunk/test/CodeGen/Hexagon/getBlockAddress.ll
    llvm/trunk/test/CodeGen/Hexagon/glob-align-volatile.ll
    llvm/trunk/test/CodeGen/Hexagon/global-const-gep.ll
    llvm/trunk/test/CodeGen/Hexagon/global-ctor-pcrel.ll
    llvm/trunk/test/CodeGen/Hexagon/global64bitbug.ll
    llvm/trunk/test/CodeGen/Hexagon/hello-world-v55.ll
    llvm/trunk/test/CodeGen/Hexagon/hello-world-v60.ll
    llvm/trunk/test/CodeGen/Hexagon/hexagon-tfr-add.ll
    llvm/trunk/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll
    llvm/trunk/test/CodeGen/Hexagon/hexagon_cfi_offset.ll
    llvm/trunk/test/CodeGen/Hexagon/hidden-relocation.ll
    llvm/trunk/test/CodeGen/Hexagon/honor-optsize.ll
    llvm/trunk/test/CodeGen/Hexagon/hrc-stack-coloring.ll
    llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store-double.ll
    llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store.ll
    llvm/trunk/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll
    llvm/trunk/test/CodeGen/Hexagon/hvx-double-vzero.ll
    llvm/trunk/test/CodeGen/Hexagon/hvx-dual-output.ll
    llvm/trunk/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
    llvm/trunk/test/CodeGen/Hexagon/hvx-vzero.ll
    llvm/trunk/test/CodeGen/Hexagon/hwloop-ice.ll
    llvm/trunk/test/CodeGen/Hexagon/hwloop-long.ll
    llvm/trunk/test/CodeGen/Hexagon/hwloop-phi-subreg.ll
    llvm/trunk/test/CodeGen/Hexagon/hwloop-swap.ll
    llvm/trunk/test/CodeGen/Hexagon/hwloop-with-return-call.ll
    llvm/trunk/test/CodeGen/Hexagon/hx_V6_lo_hi.ll
    llvm/trunk/test/CodeGen/Hexagon/i128-bitop.ll
    llvm/trunk/test/CodeGen/Hexagon/ignore-terminal-mbb.ll
    llvm/trunk/test/CodeGen/Hexagon/initial-exec.ll
    llvm/trunk/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll
    llvm/trunk/test/CodeGen/Hexagon/inline-asm-error.ll
    llvm/trunk/test/CodeGen/Hexagon/insert.ll
    llvm/trunk/test/CodeGen/Hexagon/integer_abs.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-alu.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-misc.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-permute.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-shift.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll
    llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll
    llvm/trunk/test/CodeGen/Hexagon/invalid-memrefs.ll
    llvm/trunk/test/CodeGen/Hexagon/jump-table-g0.ll
    llvm/trunk/test/CodeGen/Hexagon/jump-table-isel.ll
    llvm/trunk/test/CodeGen/Hexagon/large-number-of-preds.ll
    llvm/trunk/test/CodeGen/Hexagon/lcomm.ll
    llvm/trunk/test/CodeGen/Hexagon/load-abs.ll
    llvm/trunk/test/CodeGen/Hexagon/local-exec.ll
    llvm/trunk/test/CodeGen/Hexagon/loop-rotate-bug.ll
    llvm/trunk/test/CodeGen/Hexagon/loop-rotate-liveins.ll
    llvm/trunk/test/CodeGen/Hexagon/loop_correctness.ll
    llvm/trunk/test/CodeGen/Hexagon/lower-i1.ll
    llvm/trunk/test/CodeGen/Hexagon/machine-sink.ll
    llvm/trunk/test/CodeGen/Hexagon/maddsubu.ll
    llvm/trunk/test/CodeGen/Hexagon/mapped_intrinsics.ll
    llvm/trunk/test/CodeGen/Hexagon/mem-load-circ.ll
    llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub.ll
    llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
    llvm/trunk/test/CodeGen/Hexagon/memcmp.ll
    llvm/trunk/test/CodeGen/Hexagon/memcpy-memmove-inline.ll
    llvm/trunk/test/CodeGen/Hexagon/memop-bit18.ll
    llvm/trunk/test/CodeGen/Hexagon/memops_global.ll
    llvm/trunk/test/CodeGen/Hexagon/memset-inline.ll
    llvm/trunk/test/CodeGen/Hexagon/mipi-double-small.ll
    llvm/trunk/test/CodeGen/Hexagon/mpysin-imm.ll
    llvm/trunk/test/CodeGen/Hexagon/mul64.ll
    llvm/trunk/test/CodeGen/Hexagon/muxii-crash.ll
    llvm/trunk/test/CodeGen/Hexagon/neg-op.ll
    llvm/trunk/test/CodeGen/Hexagon/newvaluejump-postinc.ll
    llvm/trunk/test/CodeGen/Hexagon/newvaluestore2.ll
    llvm/trunk/test/CodeGen/Hexagon/no-falign-function-for-size.ll
    llvm/trunk/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll
    llvm/trunk/test/CodeGen/Hexagon/no_struct_element.ll
    llvm/trunk/test/CodeGen/Hexagon/noreturn-noepilog.ll
    llvm/trunk/test/CodeGen/Hexagon/noreturn-notail.ll
    llvm/trunk/test/CodeGen/Hexagon/not-op.ll
    llvm/trunk/test/CodeGen/Hexagon/ntstbit.ll
    llvm/trunk/test/CodeGen/Hexagon/nv_store_vec.ll
    llvm/trunk/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll
    llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-000.ll
    llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-001.ll
    llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-003.ll
    llvm/trunk/test/CodeGen/Hexagon/opt-sext-intrinsics.ll
    llvm/trunk/test/CodeGen/Hexagon/packed-store.ll
    llvm/trunk/test/CodeGen/Hexagon/packetize-allocframe.ll
    llvm/trunk/test/CodeGen/Hexagon/packetize-call-r29.ll
    llvm/trunk/test/CodeGen/Hexagon/packetize-impdef-1.ll
    llvm/trunk/test/CodeGen/Hexagon/packetize-impdef.ll
    llvm/trunk/test/CodeGen/Hexagon/packetize-l2fetch.ll
    llvm/trunk/test/CodeGen/Hexagon/packetize-volatiles.ll
    llvm/trunk/test/CodeGen/Hexagon/peephole-move-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/phi-elim.ll
    llvm/trunk/test/CodeGen/Hexagon/pic-jt-big.ll
    llvm/trunk/test/CodeGen/Hexagon/pmpyw_acc.ll
    llvm/trunk/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll
    llvm/trunk/test/CodeGen/Hexagon/pred-sched.ll
    llvm/trunk/test/CodeGen/Hexagon/pred-simp.ll
    llvm/trunk/test/CodeGen/Hexagon/pred-taken-jump.ll
    llvm/trunk/test/CodeGen/Hexagon/predtfrs.ll
    llvm/trunk/test/CodeGen/Hexagon/prefetch-intr.ll
    llvm/trunk/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll
    llvm/trunk/test/CodeGen/Hexagon/prob-types.ll
    llvm/trunk/test/CodeGen/Hexagon/ps_call_nr.ll
    llvm/trunk/test/CodeGen/Hexagon/rdf-copy-undef.ll
    llvm/trunk/test/CodeGen/Hexagon/rdf-kill-last-op.ll
    llvm/trunk/test/CodeGen/Hexagon/redundant-branching2.ll
    llvm/trunk/test/CodeGen/Hexagon/reg-eq-cmp.ll
    llvm/trunk/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll
    llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-2.ll
    llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-4.ll
    llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-5.ll
    llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug.ll
    llvm/trunk/test/CodeGen/Hexagon/reg_seq.ll
    llvm/trunk/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll
    llvm/trunk/test/CodeGen/Hexagon/registerscavenger-fail1.ll
    llvm/trunk/test/CodeGen/Hexagon/regp-underflow.ll
    llvm/trunk/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll
    llvm/trunk/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll
    llvm/trunk/test/CodeGen/Hexagon/regscavengerbug.ll
    llvm/trunk/test/CodeGen/Hexagon/rotl-i64.ll
    llvm/trunk/test/CodeGen/Hexagon/save-kill-csr.ll
    llvm/trunk/test/CodeGen/Hexagon/save-regs-thresh.ll
    llvm/trunk/test/CodeGen/Hexagon/sdata-expand-const.ll
    llvm/trunk/test/CodeGen/Hexagon/sdata-opaque-type.ll
    llvm/trunk/test/CodeGen/Hexagon/sdata-stack-guard.ll
    llvm/trunk/test/CodeGen/Hexagon/setmemrefs.ll
    llvm/trunk/test/CodeGen/Hexagon/sfmin_dce.ll
    llvm/trunk/test/CodeGen/Hexagon/sfmpyacc_scale.ll
    llvm/trunk/test/CodeGen/Hexagon/split-vecpred.ll
    llvm/trunk/test/CodeGen/Hexagon/stack-guard-acceptable-type.ll
    llvm/trunk/test/CodeGen/Hexagon/store-AbsSet.ll
    llvm/trunk/test/CodeGen/Hexagon/store-abs.ll
    llvm/trunk/test/CodeGen/Hexagon/store-constant.ll
    llvm/trunk/test/CodeGen/Hexagon/store-imm-byte.ll
    llvm/trunk/test/CodeGen/Hexagon/store-imm-halword.ll
    llvm/trunk/test/CodeGen/Hexagon/store-imm-word.ll
    llvm/trunk/test/CodeGen/Hexagon/store-widen-subreg.ll
    llvm/trunk/test/CodeGen/Hexagon/store1.ll
    llvm/trunk/test/CodeGen/Hexagon/store_abs.ll
    llvm/trunk/test/CodeGen/Hexagon/struct-const.ll
    llvm/trunk/test/CodeGen/Hexagon/struct_copy.ll
    llvm/trunk/test/CodeGen/Hexagon/struct_copy_sched_r16.ll
    llvm/trunk/test/CodeGen/Hexagon/sub-add.ll
    llvm/trunk/test/CodeGen/Hexagon/subh-shifted.ll
    llvm/trunk/test/CodeGen/Hexagon/subh.ll
    llvm/trunk/test/CodeGen/Hexagon/swiz.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-badorder.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-chain-refs.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-change-dep-cycle.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-change-dep.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-const-tc2.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-const-tc3.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-cse-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-dag-phi1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-dead-regseq.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-disable-Os.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi4.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi5.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi6.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phi8.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-phis.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse2.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse3.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-epilog-reuse4.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-exit-fixup.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-fix-last-use1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-intreglow8.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-kernel-last-use.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-kernel-phi1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-large-rec.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried-crash.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-loop-carried.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-loopval.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-lots-deps.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-maxstart.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-more-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-multi-phi-refs.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-new-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-node-order.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order-carried.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order-deps1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order-deps3.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order-deps4.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order-deps6.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order-prec.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-order1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi-chains.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi-def-use.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi-dep1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi-order.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi-ref1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-physreg.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-prolog-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-regseq.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-remove-dep-ice.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-rename-dead-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-replace-uses1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-resmii.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-1.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-2.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi-4.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-reuse-phi.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-stages.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-stages3.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-subreg.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-swap.ll
    llvm/trunk/test/CodeGen/Hexagon/swp-tfri.ll
    llvm/trunk/test/CodeGen/Hexagon/tcm-zext.ll
    llvm/trunk/test/CodeGen/Hexagon/testbits.ll
    llvm/trunk/test/CodeGen/Hexagon/tfr-mux-nvj.ll
    llvm/trunk/test/CodeGen/Hexagon/tied_oper.ll
    llvm/trunk/test/CodeGen/Hexagon/tls_gd.ll
    llvm/trunk/test/CodeGen/Hexagon/trivialmemaliascheck.ll
    llvm/trunk/test/CodeGen/Hexagon/trunc-mpy.ll
    llvm/trunk/test/CodeGen/Hexagon/tstbit.ll
    llvm/trunk/test/CodeGen/Hexagon/twoaddressbug.ll
    llvm/trunk/test/CodeGen/Hexagon/undef-ret.ll
    llvm/trunk/test/CodeGen/Hexagon/unordered-fcmp.ll
    llvm/trunk/test/CodeGen/Hexagon/upper-mpy.ll
    llvm/trunk/test/CodeGen/Hexagon/v5_insns.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-inlasm1.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-inlasm2.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-inlasm3.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-inlasm4.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-shuffl.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-spill1.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-unaligned-spill.ll
    llvm/trunk/test/CodeGen/Hexagon/v6-vecpred-copy.ll
    llvm/trunk/test/CodeGen/Hexagon/v60-align.ll
    llvm/trunk/test/CodeGen/Hexagon/v60-haar-postinc.ll
    llvm/trunk/test/CodeGen/Hexagon/v60-halide-vcombinei8.ll
    llvm/trunk/test/CodeGen/Hexagon/v60-vec-128b-1.ll
    llvm/trunk/test/CodeGen/Hexagon/v60-vecpred-spill.ll
    llvm/trunk/test/CodeGen/Hexagon/v60-vsel2.ll
    llvm/trunk/test/CodeGen/Hexagon/v60_Q6_P_rol_PI.ll
    llvm/trunk/test/CodeGen/Hexagon/v60_sort16.ll
    llvm/trunk/test/CodeGen/Hexagon/v60rol-instrs.ll
    llvm/trunk/test/CodeGen/Hexagon/v62-CJAllSlots.ll
    llvm/trunk/test/CodeGen/Hexagon/v62-inlasm4.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vassignp.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vec-vmemcur-prob.mir
    llvm/trunk/test/CodeGen/Hexagon/v6vec-vshuff.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vec_zero.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-fail1.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl-spill.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-dbl.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-dh1.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-locals1.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-no-sideeffects.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-pred2.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-spill-kill.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-vmem1.ll
    llvm/trunk/test/CodeGen/Hexagon/v6vect-vsplat.ll
    llvm/trunk/test/CodeGen/Hexagon/vadd1.ll
    llvm/trunk/test/CodeGen/Hexagon/varargs-memv.ll
    llvm/trunk/test/CodeGen/Hexagon/vasrh.select.ll
    llvm/trunk/test/CodeGen/Hexagon/vcombine128_to_req_seq.ll
    llvm/trunk/test/CodeGen/Hexagon/vcombine_subreg.ll
    llvm/trunk/test/CodeGen/Hexagon/vcombine_to_req_seq.ll
    llvm/trunk/test/CodeGen/Hexagon/vec-align.ll
    llvm/trunk/test/CodeGen/Hexagon/vec-call-full1.ll
    llvm/trunk/test/CodeGen/Hexagon/vecPred2Vec.ll
    llvm/trunk/test/CodeGen/Hexagon/vect-any_extend.ll
    llvm/trunk/test/CodeGen/Hexagon/vect-dbl-post-inc.ll
    llvm/trunk/test/CodeGen/Hexagon/vect-set_cc_v2i32.ll
    llvm/trunk/test/CodeGen/Hexagon/vect-vd0.ll
    llvm/trunk/test/CodeGen/Hexagon/vect-zero_extend.ll
    llvm/trunk/test/CodeGen/Hexagon/vect_setcc.ll
    llvm/trunk/test/CodeGen/Hexagon/vect_setcc_v2i16.ll
    llvm/trunk/test/CodeGen/Hexagon/verify-sink-code.ll
    llvm/trunk/test/CodeGen/Hexagon/verify-undef.ll
    llvm/trunk/test/CodeGen/Hexagon/vmemu-128.ll
    llvm/trunk/test/CodeGen/Hexagon/vrcmpys.ll
    llvm/trunk/test/CodeGen/Hexagon/vsplat-ext.ll
    llvm/trunk/test/CodeGen/Hexagon/wcsrtomb.ll

Added: llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,57 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b4, !prof !3
+
+b1:                                               ; preds = %b3, %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = load <32 x i32>, <32 x i32>* undef, align 512, !tbaa !4
+  %v1 = shufflevector <32 x i32> %v0, <32 x i32> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  %v2 = shufflevector <64 x i32> %v1, <64 x i32> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
+  %v3 = trunc <128 x i32> %v2 to <128 x i16>
+  %v4 = mul nsw <128 x i16> undef, %v3
+  %v5 = bitcast <128 x i16> %v4 to <64 x i32>
+  %v6 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v5)
+  %v7 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> %v6, <64 x i32> undef)
+  br i1 undef, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v7)
+  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vasrhubsat.128B(<32 x i32> %v8, <32 x i32> undef, i32 4)
+  store <32 x i32> %v9, <32 x i32>* undef, align 1, !tbaa !7
+  br label %b1
+
+b4:                                               ; preds = %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32>, <64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vasrhubsat.128B(<32 x i32>, <32 x i32>, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+
+!llvm.module.flags = !{!0, !1, !2}
+
+!0 = !{i32 2, !"halide_use_soft_float_abi", i32 0}
+!1 = !{i32 2, !"halide_mcpu", !"hexagonv60"}
+!2 = !{i32 2, !"halide_mattrs", !"+hvx"}
+!3 = !{!"branch_weights", i32 1073741824, i32 0}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"mask", !6}
+!6 = !{!"Halide buffer"}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"conv3x3", !6}

Added: llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/Halide_vec_cast_trunc2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,61 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b6, !prof !3
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b3, !prof !3
+
+b2:                                               ; preds = %b1
+  unreachable
+
+b3:                                               ; preds = %b1
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v0 = load <32 x i32>, <32 x i32>* undef, align 512, !tbaa !4
+  %v1 = shufflevector <32 x i32> %v0, <32 x i32> undef, <64 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  %v2 = shufflevector <64 x i32> undef, <64 x i32> %v1, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
+  %v3 = trunc <128 x i32> %v2 to <128 x i16>
+  %v4 = mul nsw <128 x i16> undef, %v3
+  %v5 = bitcast <128 x i16> %v4 to <64 x i32>
+  %v6 = tail call <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32> undef, <64 x i32> %v5)
+  %v7 = bitcast <64 x i32> %v6 to <128 x i16>
+  %v8 = shufflevector <128 x i16> %v7, <128 x i16> undef, <64 x i32> <i32 64, i32 65, i32 66, i32 67, i32 68, i32 69, i32 70, i32 71, i32 72, i32 73, i32 74, i32 75, i32 76, i32 77, i32 78, i32 79, i32 80, i32 81, i32 82, i32 83, i32 84, i32 85, i32 86, i32 87, i32 88, i32 89, i32 90, i32 91, i32 92, i32 93, i32 94, i32 95, i32 96, i32 97, i32 98, i32 99, i32 100, i32 101, i32 102, i32 103, i32 104, i32 105, i32 106, i32 107, i32 108, i32 109, i32 110, i32 111, i32 112, i32 113, i32 114, i32 115, i32 116, i32 117, i32 118, i32 119, i32 120, i32 121, i32 122, i32 123, i32 124, i32 125, i32 126, i32 127>
+  br i1 undef, label %b5, label %b4
+
+b5:                                               ; preds = %b4
+  store <64 x i16> %v8, <64 x i16>* undef, align 1024, !tbaa !7
+  br label %b6
+
+b6:                                               ; preds = %b5, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vaddh.dv.128B(<64 x i32>, <64 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+
+!llvm.module.flags = !{!0, !1, !2}
+
+!0 = !{i32 2, !"halide_use_soft_float_abi", i32 0}
+!1 = !{i32 2, !"halide_mcpu", !"hexagonv60"}
+!2 = !{i32 2, !"halide_mattrs", !"+hvx"}
+!3 = !{!"branch_weights", i32 1073741824, i32 0}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"mask", !6}
+!6 = !{!"Halide buffer"}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"sum.width64.base64", !9}
+!9 = !{!"sum.width128.base0", !10}
+!10 = !{!"sum.width256.base0", !11}
+!11 = !{!"sum.width512.base0", !12}
+!12 = !{!"sum.width1024.base0", !13}
+!13 = !{!"sum", !6}

Added: llvm/trunk/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/M4_mpyri_addi_global.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = add(##g0,mpyi(r{{[0-9]+}},#24))
+
+%s.0 = type { i32, i32, i32, i32, i32, i8 }
+
+ at g0 = common global [2 x %s.0] zeroinitializer, align 8
+
+declare void @f0(%s.0*)
+
+; Function Attrs: nounwind readnone
+define void @f1(i32 %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 %a0
+  call void @f0(%s.0* %v0) #1
+  ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/M4_mpyrr_addi_global.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,22 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = add(##g0{{.*}},mpyi(r{{[0-9]+}},r{{[0-9]+}}))
+
+%s.0 = type { %s.1, %s.1* }
+%s.1 = type { i8, i8, i8, i8, i16, i16, i8, [3 x i8], [20 x %s.2] }
+%s.2 = type { i8, i8, [2 x i8], [2 x i8] }
+
+ at g0 = external global [2 x %s.0]
+
+declare void @f0(%s.1**)
+
+; Function Attrs: nounwind readnone
+define void @f1(i32 %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds [2 x %s.0], [2 x %s.0]* @g0, i32 0, i32 %a0
+  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 1
+  call void @f0(%s.1** %v1) #1
+  ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/P08214.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/P08214.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/P08214.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/P08214.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,224 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+; Check for successful compilation.
+
+target triple = "hexagon-unknown--elf"
+
+%s.0 = type { i32 (...)** }
+%s.1 = type { i32 }
+%s.2 = type { %s.1 }
+
+ at g0 = global { i32, i32 } { i32 ptrtoint (i32 (%s.1*)* @f0 to i32), i32 0 }, align 4
+ at g1 = global i32 0, align 4
+ at g2 = global %s.0 zeroinitializer, align 4
+ at g3 = global { i32, i32 } { i32 1, i32 0 }, align 4
+ at g4 = global i32 0, align 4
+ at g5 = global i32 0, align 4
+ at g6 = global i32 0, align 4
+ at g7 = private unnamed_addr constant [53 x i8] c"REF: ISO/IEC 14882:1998, 8.2.3 Pointers to members.\0A\00", align 1
+ at g8 = private unnamed_addr constant [6 x i8] c"%s\0A%s\00", align 1
+ at g9 = private unnamed_addr constant [43 x i8] c"Can we assign a pointer to member function\00", align 1
+ at g10 = private unnamed_addr constant [49 x i8] c" to a function member of the second base class?\0A\00", align 1
+ at g11 = external global i32
+ at g12 = private unnamed_addr constant [46 x i8] c"Can we assign a pointer to member to a member\00", align 1
+ at g13 = private unnamed_addr constant [29 x i8] c"  of the second base class?\0A\00", align 1
+ at g14 = private unnamed_addr constant [7 x i8] c"%s\0A%s\0A\00", align 1
+ at g15 = private unnamed_addr constant [51 x i8] c"Testing dereferencing a pointer to member function\00", align 1
+ at g16 = private unnamed_addr constant [24 x i8] c"in a complex expression\00", align 1
+ at g17 = linkonce_odr unnamed_addr constant [3 x i8*] [i8* null, i8* bitcast ({ i8*, i8* }* @g20 to i8*), i8* bitcast (i32 (%s.0*)* @f9 to i8*)]
+ at g18 = external global i8*
+ at g19 = linkonce_odr constant [3 x i8] c"1S\00"
+ at g20 = linkonce_odr constant { i8*, i8* } { i8* bitcast (i8** getelementptr inbounds (i8*, i8** @g18, i32 2) to i8*), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g19, i32 0, i32 0) }
+
+; Function Attrs: nounwind readnone
+define linkonce_odr i32 @f0(%s.1* nocapture readnone %a0) #0 align 2 {
+b0:
+  ret i32 11
+}
+
+; Function Attrs: nounwind readnone
+define %s.0* @f1() #0 {
+b0:
+  ret %s.0* @g2
+}
+
+define internal fastcc void @f2() {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !0
+  %v1 = add nsw i32 %v0, 5
+  store i32 %v1, i32* @g5, align 4, !tbaa !0
+  %v2 = load { i32, i32 }, { i32, i32 }* @g3, align 4, !tbaa !4
+  %v3 = extractvalue { i32, i32 } %v2, 1
+  %v4 = getelementptr inbounds i8, i8* bitcast (%s.0* @g2 to i8*), i32 %v3
+  %v5 = bitcast i8* %v4 to %s.0*
+  %v6 = extractvalue { i32, i32 } %v2, 0
+  %v7 = and i32 %v6, 1
+  %v8 = icmp eq i32 %v7, 0
+  br i1 %v8, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v9 = bitcast i8* %v4 to i8**
+  %v10 = load i8*, i8** %v9, align 4, !tbaa !5
+  %v11 = add i32 %v6, -1
+  %v12 = getelementptr i8, i8* %v10, i32 %v11
+  %v13 = bitcast i8* %v12 to i32 (%s.0*)**
+  %v14 = load i32 (%s.0*)*, i32 (%s.0*)** %v13, align 4
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v15 = inttoptr i32 %v6 to i32 (%s.0*)*
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v16 = phi i32 (%s.0*)* [ %v14, %b1 ], [ %v15, %b2 ]
+  %v17 = tail call i32 %v16(%s.0* %v5)
+  store i32 %v17, i32* @g6, align 4, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+define i32 @f3() #0 {
+b0:
+  %v0 = alloca %s.2, align 4
+  %v1 = alloca %s.2, align 4
+  tail call void @f4()
+  tail call void @f5()
+  tail call void (i8*, ...) @f6(i8* getelementptr inbounds ([53 x i8], [53 x i8]* @g7, i32 0, i32 0))
+  tail call void (i8*, ...) @f6(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g8, i32 0, i32 0), i8* getelementptr inbounds ([43 x i8], [43 x i8]* @g9, i32 0, i32 0), i8* getelementptr inbounds ([49 x i8], [49 x i8]* @g10, i32 0, i32 0))
+  %v2 = load { i32, i32 }, { i32, i32 }* @g0, align 4, !tbaa !4
+  %v3 = extractvalue { i32, i32 } %v2, 1
+  %v4 = bitcast %s.2* %v0 to i8*
+  %v5 = getelementptr inbounds i8, i8* %v4, i32 %v3
+  %v6 = bitcast i8* %v5 to %s.2*
+  %v7 = extractvalue { i32, i32 } %v2, 0
+  %v8 = and i32 %v7, 1
+  %v9 = icmp eq i32 %v8, 0
+  br i1 %v9, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v10 = inttoptr i32 %v7 to i32 (%s.2*)*
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v11 = phi i32 (%s.2*)* [ %v10, %b1 ], [ undef, %b0 ]
+  %v12 = call i32 %v11(%s.2* %v6)
+  %v13 = icmp eq i32 %v12, 11
+  br i1 %v13, label %b4, label %b3
+
+b3:                                               ; preds = %b2
+  store i32 1, i32* @g11, align 4, !tbaa !0
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v14 = call i32 @f7()
+  call void @f5()
+  call void (i8*, ...) @f6(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g8, i32 0, i32 0), i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g12, i32 0, i32 0), i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g13, i32 0, i32 0))
+  %v15 = getelementptr inbounds %s.2, %s.2* %v1, i32 0, i32 0, i32 0
+  store i32 11, i32* %v15, align 4, !tbaa !7
+  %v16 = load i32, i32* @g1, align 4, !tbaa !4
+  %v17 = bitcast %s.2* %v1 to i8*
+  %v18 = getelementptr inbounds i8, i8* %v17, i32 %v16
+  %v19 = bitcast i8* %v18 to i32*
+  %v20 = load i32, i32* %v19, align 4, !tbaa !0
+  %v21 = icmp eq i32 %v20, 11
+  br i1 %v21, label %b6, label %b5
+
+b5:                                               ; preds = %b4
+  store i32 1, i32* @g11, align 4, !tbaa !0
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4
+  %v22 = call i32 @f7()
+  call void @f5()
+  call void (i8*, ...) @f6(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g14, i32 0, i32 0), i8* getelementptr inbounds ([51 x i8], [51 x i8]* @g15, i32 0, i32 0), i8* getelementptr inbounds ([24 x i8], [24 x i8]* @g16, i32 0, i32 0))
+  %v23 = load i32, i32* @g4, align 4, !tbaa !0
+  %v24 = icmp eq i32 %v23, 11
+  br i1 %v24, label %b8, label %b7
+
+b7:                                               ; preds = %b6
+  store i32 1, i32* @g11, align 4, !tbaa !0
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6
+  %v25 = call i32 @f7()
+  call void @f5()
+  call void (i8*, ...) @f6(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g14, i32 0, i32 0), i8* getelementptr inbounds ([51 x i8], [51 x i8]* @g15, i32 0, i32 0), i8* getelementptr inbounds ([24 x i8], [24 x i8]* @g16, i32 0, i32 0))
+  %v26 = load i32, i32* @g6, align 4, !tbaa !0
+  %v27 = icmp eq i32 %v26, 11
+  br i1 %v27, label %b10, label %b9
+
+b9:                                               ; preds = %b8
+  store i32 1, i32* @g11, align 4, !tbaa !0
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  %v28 = call i32 @f7()
+  %v29 = call i32 @f8(i32 4)
+  ret i32 %v29
+}
+
+; Function Attrs: nounwind readnone
+declare void @f4() #0
+
+; Function Attrs: nounwind readnone
+declare void @f5() #0
+
+; Function Attrs: nounwind readnone
+declare void @f6(i8*, ...) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @f7() #0
+
+; Function Attrs: nounwind readnone
+declare i32 @f8(i32) #0
+
+; Function Attrs: nounwind readnone
+define linkonce_odr i32 @f9(%s.0* nocapture readnone %a0) unnamed_addr #0 align 2 {
+b0:
+  ret i32 11
+}
+
+define internal void @f10() {
+b0:
+  store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @g17, i32 0, i32 2) to i32 (...)**), i32 (...)*** getelementptr inbounds (%s.0, %s.0* @g2, i32 0, i32 0), align 4, !tbaa !5
+  %v0 = load { i32, i32 }, { i32, i32 }* @g3, align 4, !tbaa !4
+  %v1 = extractvalue { i32, i32 } %v0, 1
+  %v2 = getelementptr inbounds i8, i8* bitcast (%s.0* @g2 to i8*), i32 %v1
+  %v3 = bitcast i8* %v2 to %s.0*
+  %v4 = extractvalue { i32, i32 } %v0, 0
+  %v5 = and i32 %v4, 1
+  %v6 = icmp eq i32 %v5, 0
+  br i1 %v6, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v7 = bitcast i8* %v2 to i8**
+  %v8 = load i8*, i8** %v7, align 4, !tbaa !5
+  %v9 = add i32 %v4, -1
+  %v10 = getelementptr i8, i8* %v8, i32 %v9
+  %v11 = bitcast i8* %v10 to i32 (%s.0*)**
+  %v12 = load i32 (%s.0*)*, i32 (%s.0*)** %v11, align 4
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v13 = inttoptr i32 %v4 to i32 (%s.0*)*
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v14 = phi i32 (%s.0*)* [ %v12, %b1 ], [ %v13, %b2 ]
+  %v15 = tail call i32 %v14(%s.0* %v3)
+  store i32 %v15, i32* @g4, align 4, !tbaa !0
+  tail call fastcc void @f2()
+  ret void
+}
+
+attributes #0 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"vtable pointer", !3, i64 0}
+!7 = !{!8, !1, i64 0}
+!8 = !{!"_ZTS2B2", !1, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/V60-VDblNew.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/V60-VDblNew.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/V60-VDblNew.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/V60-VDblNew.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon -O2 -disable-vecdbl-nv-stores=0 < %s | FileCheck %s
+
+; CHECK-NOT: v{{[0-9]*}}.new
+
+target triple = "hexagon"
+
+ at g0 = common global [15 x <16 x i32>] zeroinitializer, align 64
+ at g1 = common global <32 x i32> zeroinitializer, align 128
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 0), align 64, !tbaa !0
+  %v1 = load <16 x i32>, <16 x i32>* getelementptr inbounds ([15 x <16 x i32>], [15 x <16 x i32>]* @g0, i32 0, i32 1), align 64, !tbaa !0
+  %v2 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v0, <16 x i32> %v1)
+  store <32 x i32> %v2, <32 x i32>* @g1, align 128, !tbaa !0
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/add-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/add-use.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/add-use.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/add-use.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Do not want to see register copies in the loop.
+; CHECK-NOT: r{{[0-9]*}} = r{{[0-9]*}}
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v1, %b1 ]
+  %v1 = add nsw i32 %v0, 3
+  %v2 = add i32 %v1, -3
+  %v3 = icmp slt i32 %v2, %a0
+  br i1 %v3, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  %v4 = phi i32 [ %v1, %b1 ]
+  %v5 = icmp slt i32 %v4, 100
+  %v6 = zext i1 %v5 to i32
+  ret i32 %v6
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/add_int_double.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/add_int_double.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/add_int_double.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/add_int_double.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,11 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} = add(r{{[0-9]+}}:{{[0-9+]}},r{{[0-9]+}}:{{[0-9]+}}):raw:{{..}}
+
+define i64 @f0(i32 %a0, i64 %a1) #0 {
+b0:
+  %v0 = sext i32 %a0 to i64
+  %v1 = add nsw i64 %v0, %a1
+  ret i64 %v1
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/add_mpi_RRR.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/add_mpi_RRR.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/add_mpi_RRR.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/add_mpi_RRR.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,29 @@
+; RUN: llc -O0 -march=hexagon < %s | FileCheck %s
+
+; CHECK: [[REG0:(r[0-9]+)]] = add(r{{[0-9]+}},mpyi([[REG0]],r{{[0-9]+}})
+; CHECK: [[REG0:(r[0-9]+)]] = add(r{{[0-9]+}},mpyi([[REG0]],r{{[0-9]+}})
+
+target triple = "hexagon"
+
+ at g0 = private unnamed_addr constant [50 x i8] c"%x :  Q6_R_add_mpyi_RRR(INT_MIN,INT_MIN,INT_MIN)\0A\00", align 1
+ at g1 = private unnamed_addr constant [45 x i8] c"%x :  Q6_R_add_mpyi_RRR(-1,INT_MIN,INT_MIN)\0A\00", align 1
+
+; Function Attrs: nounwind
+declare i32 @f0(i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.M4.mpyrr.addr(i32 -2147483648, i32 -2147483648, i32 -2147483648)
+  %v1 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @g0, i32 0, i32 0), i32 %v0) #2
+  %v2 = tail call i32 @llvm.hexagon.M4.mpyrr.addr(i32 -1, i32 -2147483648, i32 -2147483648)
+  %v3 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([45 x i8], [45 x i8]* @g1, i32 0, i32 0), i32 %v2) #2
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M4.mpyrr.addr(i32, i32, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/addasl-address.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/addasl-address.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/addasl-address.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/addasl-address.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; CHECK: r{{[0-9]*}} = add(##g0,asl(r{{[0-9]*}},#2))
+
+%s.0 = type { i16, i8 }
+
+ at g0 = internal global [20 x i8*] zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define void @f0(%s.0* %a0) #0 {
+b0:
+  %v0 = icmp eq %s.0* %a0, null
+  br i1 %v0, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
+  %v2 = load i8, i8* %v1, align 1, !tbaa !0
+  %v3 = zext i8 %v2 to i32
+  %v4 = getelementptr inbounds [20 x i8*], [20 x i8*]* @g0, i32 0, i32 %v3
+  %v5 = bitcast i8** %v4 to i8*
+  tail call void @f1(i8* %v5) #0
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+declare void @f1(i8*)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/addrmode-keepdeadphis.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,50 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Make sure that the addressing mode optimization does not propagate
+; an add instruction where the base register would have a different
+; reaching def.
+
+; CHECK-LABEL: f0.1:
+; CHECK-LABEL: %b0
+; CHECK:         r17 = add(r{{[0-9]+}},#8)
+; CHECK-LABEL: %b1
+; CHECK:         r16 = r0
+; CHECK-LABEL: %b2
+; CHECK:         memd(r17+#0)
+
+target triple = "hexagon"
+
+%s.0 = type { i8, i8, %s.1, i32 }
+%s.1 = type { %s.2, [128 x i8] }
+%s.2 = type { i8, i8, i64, %s.3 }
+%s.3 = type { i8 }
+
+define void @f0.1() local_unnamed_addr #0 align 2 {
+b0:
+  %v0 = alloca %s.0, align 8
+  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 1
+  store i8 4, i8* %v1, align 1
+  %v2 = call signext i8 @f1.2(%s.3* undef) #0
+  %v3 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 2, i32 0, i32 0
+  %v4 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 2, i32 0, i32 3, i32 0
+  store i8 -1, i8* %v4, align 8
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v5 = call dereferenceable(12) %s.3* @f2.3(%s.3* nonnull undef, %s.3* nonnull dereferenceable(80) undef) #0
+  %v6 = call signext i8 @f1.2(%s.3* undef) #0
+  %v7 = call dereferenceable(12) %s.3* @f3(%s.3* nonnull %v5, i16 signext undef) #0
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 undef, i8* align 8 %v3, i32 48, i1 false)
+  ret void
+}
+
+declare signext i8 @f1.2(%s.3*) #0
+declare dereferenceable(12) %s.3* @f2.3(%s.3*, %s.3* dereferenceable(80)) #0
+declare dereferenceable(12) %s.3* @f3(%s.3*, i16 signext) #0
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="-long-calls" }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/addrmode-offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/addrmode-offset.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/addrmode-offset.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/addrmode-offset.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,66 @@
+; RUN: llc -march=hexagon  -O3 < %s | FileCheck %s
+
+; CHECK-NOT: [[REG0:(r[0-9]+)]] = memw([[REG0:(r[0-9]+)]]<<#2+##state-4)
+
+%s.0 = type { i16, [10 x %s.1*] }
+%s.1 = type { %s.2, i16, i16 }
+%s.2 = type { i8, [15 x %s.3], [18 x %s.4], %s.5, i16 }
+%s.3 = type { %s.5, %s.4*, i8*, i16, i8, i8, [3 x %s.4*], [3 x %s.4*], [3 x %s.4*] }
+%s.4 = type { %s.5, %s.5*, i8, i16, i8 }
+%s.5 = type { %s.5*, %s.5* }
+%s.6 = type { i8, i8 }
+
+ at g0 = common global %s.0 zeroinitializer, align 4
+
+; Function Attrs: nounwind optsize
+define void @f0(%s.6* nocapture readonly %a0) local_unnamed_addr #0 {
+b0:
+  %v0 = bitcast %s.6* %a0 to %s.6*
+  %v1 = getelementptr %s.6, %s.6* %v0, i32 0, i32 1
+  %v2 = load i8, i8* %v1, align 1
+  %v3 = zext i8 %v2 to i32
+  %v4 = add nsw i32 %v3, -1
+  %v5 = getelementptr %s.0, %s.0* @g0, i32 0, i32 1
+  %v6 = getelementptr [10 x %s.1*], [10 x %s.1*]* %v5, i32 0, i32 %v4
+  %v7 = load %s.1*, %s.1** %v6, align 4
+  %v8 = icmp eq %s.1* %v7, null
+  br i1 %v8, label %b4, label %b1
+
+b1:                                               ; preds = %b0
+  %v9 = bitcast %s.1* %v7 to %s.1*
+  %v10 = bitcast %s.1* %v9 to i8*
+  %v11 = load i8, i8* %v10, align 4
+  %v12 = icmp eq i8 %v11, %v2
+  br i1 %v12, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v13 = bitcast %s.6* %a0 to %s.6*
+  tail call void @f1(%s.1* nonnull %v7) #2
+  %v14 = getelementptr %s.6, %s.6* %v13, i32 0, i32 1
+  %v15 = load i8, i8* %v14, align 1
+  %v16 = zext i8 %v15 to i32
+  %v17 = add nsw i32 %v16, -1
+  %v18 = getelementptr [10 x %s.1*], [10 x %s.1*]* %v5, i32 0, i32 %v17
+  %v19 = load %s.1*, %s.1** %v18, align 4
+  %v20 = icmp eq %s.1* %v19, null
+  br i1 %v20, label %b4, label %b3
+
+b3:                                               ; preds = %b2
+  %v21 = getelementptr %s.1, %s.1* %v19, i32 0, i32 0, i32 3
+  tail call void @f2(%s.5* %v21) #2
+  store %s.1* null, %s.1** %v18, align 4
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2, %b1, %b0
+  ret void
+}
+
+; Function Attrs: optsize
+declare void @f1(%s.1*) #1
+
+; Function Attrs: optsize
+declare void @f2(%s.5*) #1
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+attributes #1 = { optsize "target-cpu"="hexagonv60" "target-features"="+hvx" }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/addrmode.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/addrmode.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/addrmode.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/addrmode.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,107 @@
+; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
+
+; CHECK-NOT: memb(r{{[0-9]+}}+#375) = #4
+; CHECK: [[REG0:(r[0-9]+)]] = add(r{{[0-9]+}},{{#?}}#374)
+; CHECK: memb([[REG0]]+#1) = #4
+
+%s.0 = type { %s.1, %s.2*, %s.2*, %s.3, %s.5, i32, i32, i16, i8, i8, i8, [7 x i8], i16, i8, i8, i16, i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i32, i16, i16, i16, [14 x i8], %s.6, i8, i8, %s.8, [2 x [16 x %s.9]], i32 (i8*, i8*, i8*, i8*, i8*)*, [80 x i8], i8, i8, i8*, i8*, i8*, i32*, i8*, i8*, i8*, [4 x i8], i8*, i8*, i8*, i8*, i8*, i8*, %s.18*, %s.18*, %s.6*, [4 x i8], [2 x [80 x [8 x i8]]], [56 x i8], [2 x [81 x %s.10]], [2 x %s.10], %s.10*, %s.10*, i32, [32 x i32], i8*, %s.12*, i8, i8, %s.18, i64*, i32, %s.19, %s.20, %s.21*, i8, [19 x i8] }
+%s.1 = type { i32, i32, i8* }
+%s.2 = type { i8, i8 }
+%s.3 = type { [371 x %s.2], [6 x %s.4] }
+%s.4 = type { %s.2*, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
+%s.5 = type { [12 x %s.2], [4 x %s.2], [2 x %s.2], [4 x %s.2], [6 x %s.2], [2 x [7 x %s.2]], [4 x %s.2], [3 x [4 x %s.2]], [3 x %s.2], [3 x %s.2] }
+%s.6 = type { i8*, i32, %s.7, i8*, i8*, i32 }
+%s.7 = type { i64 }
+%s.8 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, [2 x i8], [16 x i8], [4 x i8], [32 x i16], [32 x i16], [4 x i8], [2 x [4 x i8]], [2 x [4 x i8]], i32, i32, i16, i8 }
+%s.9 = type { [2 x i16] }
+%s.10 = type { %s.11, [2 x [4 x %s.9]], [2 x [2 x i8]], [2 x i8] }
+%s.11 = type { i8, i8, i8, i8, i8, i8, i8, i8, i32 }
+%s.12 = type { i8*, i8*, i32, i8*, i16*, i8*, i16*, i8*, i32, i16, i8, i32, i16*, i16*, i16, i16, i16, i8, i8, %s.13, i8, i8, i8, [32 x i8*], %s.14, %s.16, i8, i8, i8, i8 }
+%s.13 = type { [6 x [16 x i8]], [2 x [64 x i8]] }
+%s.14 = type { i32, i32, %s.15* }
+%s.15 = type { i16, i16 }
+%s.16 = type { %s.17 }
+%s.17 = type { i32, i32, i32 }
+%s.18 = type { i16*, i16*, i16*, i16*, i16*, i32, i32 }
+%s.19 = type { i32, i32, i32, i32 }
+%s.20 = type { i32, i32, i32 }
+%s.21 = type { %s.22*, i8, i8, i8*, i8*, i8*, i8*, i16, i8, void (%s.21*, i8, i8*, i8*, %s.25*, i32*)*, i8, i8, i8, i16, i8, i16, i8, i8, i8*, [4 x i8], i8, i8, [2 x i8], [2 x [4 x i8]], [2 x i16*], i8, i8, i8, i8, i16, i16, i16, i16, i16, i32, [4 x i8], [2 x %s.35], [2 x %s.35], [2 x [10 x %s.30]], %s.35*, %s.35*, %s.35*, %s.35*, [2 x %s.30*], [2 x %s.30*], [2 x %s.30*], [2 x %s.30*], %s.35, [2 x [16 x %s.30]], [2 x [5 x %s.30]], %s.37*, [4 x i8], %s.37, i8, i8, [6 x i8] }
+%s.22 = type { void (%s.21*, %s.23*)*, %s.27*, %s.28, %s.32, [4 x i8], [2 x [81 x %s.34]], [52 x i8], [52 x i8] }
+%s.23 = type { i16, i16, i8, [64 x %s.24], i8, i8, %s.26, [2 x i8], [4 x [2 x [4 x i16]]], [4 x [2 x [4 x i8]]], [32 x i8*], [32 x i8*] }
+%s.24 = type { %s.25, i8, i8, i8, i8, i8, i8, i8, i16 }
+%s.25 = type { i32 }
+%s.26 = type { i8, i8, i8, [2 x [3 x [32 x i16]]], [2 x [3 x [32 x i8]]] }
+%s.27 = type { i16, i16, i32, i8, [3 x i8], %s.13, i8, i8, [2 x i8], [1280 x i8], [765 x i8], [3 x i8], [2 x [640 x i8]], [2 x [720 x i8]], [80 x i8], [45 x i8], [45 x i8], [45 x i8] }
+%s.28 = type { i8, i8, i8, i32, i32, i32, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, %s.13*, i8, i8, i16*, i8, i8, i8, i8*, %s.29*, %s.31* }
+%s.29 = type { i8, %s.30*, i8* }
+%s.30 = type { %s.25, i8, i8, i8, i8 }
+%s.31 = type { i8, i8, i8, i8, %s.31**, i8, i8, i8, i8, i8, i8, i8, %s.31**, i8, %s.31**, i8*, i8*, i32, i32, i32, i32, i32, [2 x i8], [2 x i8], i32, i32, i8, i8, i32, %s.31*, %s.29*, i8, i8, i32, i32, i32, i32, i32, i32, [2 x i32], [2 x i64], [2 x i32], [2 x i32], [2 x i32], [2 x i32], [2 x i64] }
+%s.32 = type { i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %s.26, %s.6*, [32 x %s.33], %s.33, [32 x %s.33], %s.29*, i8, [2 x [32 x i8]], [32 x i8*], [32 x i8*], [2 x [32 x i8]], [72 x i8], [72 x i32], [72 x i32], [72 x i32], [3 x [2 x [32 x [32 x i16]]]] }
+%s.33 = type { i32, i32, i32, i8, i8 }
+%s.34 = type { %s.35, [2 x [4 x %s.30]] }
+%s.35 = type { i32, i16, %s.36, i8, [3 x i8], i32 }
+%s.36 = type { i16 }
+%s.37 = type { i8, [1 x %s.38], [1 x [416 x i16]], %s.40, %s.38*, %s.38*, i16*, [4 x i8], i16*, %s.40*, %s.40*, %s.40*, %s.27*, [4 x i8], %s.42, %s.23, %s.43, i8 }
+%s.38 = type { %s.39, %s.39, %s.39 }
+%s.39 = type { i8*, i16, i16, i16 }
+%s.40 = type { %s.41, %s.41, %s.41, i8 }
+%s.41 = type { i8*, i16, i16, i16 }
+%s.42 = type { [32 x i8], [3 x i8], [3 x i8], [3 x i8], [3 x i8], [3 x i8], [3 x i8], i8, i8, [4 x i8] }
+%s.43 = type { i32, i32, i32, i32, i32, [3 x i8], [3 x i8], [3 x i8], [16 x i8], i8, i8, i8, i8, i32, i32, i16, i16* }
+%s.44 = type { i8, i8 }
+
+; Function Attrs: nounwind
+define i32 @f0(%s.0* %a0, %s.21* %a1, i8 zeroext %a2, %s.18* %a3, %s.20* %a4) local_unnamed_addr #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 39, i32 2
+  %v1 = load i8, i8* %v0, align 2
+  %v2 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 47, i32 2
+  %v3 = bitcast %s.36* %v2 to %s.44*
+  %v4 = getelementptr inbounds %s.44, %s.44* %v3, i32 0, i32 1
+  store i8 %v1, i8* %v4, align 1
+  %v5 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 32
+  %v6 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 16
+  switch i8 %v1, label %b5 [
+    i8 1, label %b1
+    i8 0, label %b2
+  ]
+
+b1:                                               ; preds = %b0
+  %v7 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 39, i32 10
+  %v8 = load i8, i8* %v7, align 1
+  %v9 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 39, i32 3
+  %v10 = load i8, i8* %v9, align 1
+  store i8 %v10, i8* %v6, align 2
+  %v11 = getelementptr inbounds %s.21, %s.21* %a1, i32 0, i32 19
+  %v12 = bitcast [4 x i8]* %v11 to i32*
+  store i32 16843009, i32* %v12, align 8
+  %v13 = icmp eq i8 %v10, 15
+  switch i8 %v1, label %b4 [
+    i8 6, label %b3
+    i8 1, label %b3
+  ]
+
+b2:                                               ; preds = %b0
+  store i8 4, i8* %v4, align 1
+  store i8 0, i8* %v6, align 2
+  switch i8 %v1, label %b4 [
+    i8 6, label %b3
+    i8 1, label %b3
+  ]
+
+b3:                                               ; preds = %b2, %b2, %b1, %b1
+  %v14 = tail call fastcc signext i8 @f1(%s.21* nonnull %a1)
+  unreachable
+
+b4:                                               ; preds = %b2, %b1
+  unreachable
+
+b5:                                               ; preds = %b0
+  unreachable
+}
+
+; Function Attrs: norecurse nounwind
+declare i8 @f1(%s.21* nocapture) unnamed_addr #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" }
+attributes #1 = { norecurse nounwind "target-cpu"="hexagonv65" }

Added: llvm/trunk/test/CodeGen/Hexagon/aggr-antidep-tied.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/aggr-antidep-tied.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/aggr-antidep-tied.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/aggr-antidep-tied.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,55 @@
+; RUN: llc -march=hexagon -verify-machineinstrs < %s
+; REQUIRES: asserts
+
+; Test that the aggressive anti-dependence breaker does not attempt
+; to rename a tied operand.
+
+ at g0 = external global [4 x i64], align 8
+ at g1 = external global [6 x i64], align 8
+ at g2 = external unnamed_addr constant [45 x i8], align 1
+ at g3 = external unnamed_addr constant [26 x i8], align 1
+ at g4 = external unnamed_addr constant [29 x i8], align 1
+ at g5 = external unnamed_addr constant [29 x i8], align 1
+
+; Function Attrs: norecurse nounwind readonly
+declare i64 @f0() #0
+
+; Function Attrs: nounwind
+define void @f1() #1 {
+b0:
+  %v0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g0, i32 0, i32 0), align 8
+  %v1 = trunc i64 %v0 to i32
+  %v2 = load i64, i64* getelementptr inbounds ([6 x i64], [6 x i64]* @g1, i32 0, i32 0), align 8
+  %v3 = load i64, i64* getelementptr inbounds ([6 x i64], [6 x i64]* @g1, i32 0, i32 3), align 8
+  %v4 = lshr i64 %v2, 32
+  %v5 = trunc i64 %v4 to i32
+  %v6 = add i32 %v5, 0
+  %v7 = trunc i64 %v3 to i32
+  %v8 = lshr i64 %v3, 32
+  %v9 = add i32 %v6, %v7
+  %v10 = trunc i64 %v8 to i32
+  %v11 = add i32 %v9, %v10
+  %v12 = add i32 %v11, 0
+  %v13 = add i32 %v12, 0
+  %v14 = tail call i64 @f0()
+  %v15 = lshr i64 %v0, 16
+  %v16 = trunc i64 %v15 to i32
+  %v17 = and i32 %v16, 65535
+  %v18 = add nuw nsw i32 %v17, 0
+  %v19 = zext i32 %v18 to i64
+  %v20 = add i32 %v16, %v1
+  %v21 = and i32 %v20, 65535
+  %v22 = zext i32 %v21 to i64
+  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([45 x i8], [45 x i8]* @g2, i32 0, i32 0), i32 %v13) #2
+  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([26 x i8], [26 x i8]* @g3, i32 0, i32 0), i64 %v14) #2
+  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g4, i32 0, i32 0), i64 %v19) #2
+  tail call void (i8*, ...) @f2(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g5, i32 0, i32 0), i64 %v22) #2
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @f2(i8* nocapture readonly, ...) #1
+
+attributes #0 = { norecurse nounwind readonly "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind "target-cpu"="hexagonv55" }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/aggr-copy-order.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/aggr-copy-order.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/aggr-copy-order.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/aggr-copy-order.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon -mattr=-packets -hexagon-check-bank-conflict=0 < %s | FileCheck %s
+; Do not check stores. They undergo some optimizations in the DAG combiner
+; resulting in getting out of order. There is likely little that can be
+; done to keep the original order.
+
+target triple = "hexagon"
+
+%s.0 = type { i32, i32, i32 }
+
+; Function Attrs: nounwind
+define void @f0(%s.0* %a0, %s.0* %a1) #0 {
+b0:
+; CHECK: = memw({{.*}}+#0)
+; CHECK: = memw({{.*}}+#4)
+; CHECK: = memw({{.*}}+#8)
+  %v0 = alloca %s.0*, align 4
+  %v1 = alloca %s.0*, align 4
+  store %s.0* %a0, %s.0** %v0, align 4
+  store %s.0* %a1, %s.0** %v1, align 4
+  %v2 = load %s.0*, %s.0** %v0, align 4
+  %v3 = load %s.0*, %s.0** %v1, align 4
+  %v4 = bitcast %s.0* %v2 to i8*
+  %v5 = bitcast %s.0* %v3 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v4, i8* align 4 %v5, i32 12, i1 false)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/aggr-licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/aggr-licm.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/aggr-licm.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/aggr-licm.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,226 @@
+; RUN: llc -march=hexagon < %s -enable-misched=false | FileCheck %s
+
+; Test that LICM doesn't hoist an instruction incorrectly
+; when register aliases are not processed. In this case, LICM hoists
+; a register assignment of 0, but the register is defined as
+; double register (with two and instructions) already.
+
+; CHECK: [[REG0:r([0-9]+)]] = and
+; CHECK: [[REG1:r([0-9]+)]] = and
+; CHECK-NOT: [[REG0]] =
+; CHECK-NOT: [[REG1]] =
+; CHECK: .LBB
+
+; Function Attrs: nounwind readnone
+define i64 @f0(i64 %a0) #0 {
+b0:
+  %v0 = lshr i64 %a0, 1
+  %v1 = and i64 %v0, 6148914691236517205
+  %v2 = and i64 %a0, 6148914691236517205
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v3 = phi i64 [ %v2, %b0 ], [ %v197, %b1 ]
+  %v4 = phi i64 [ %v1, %b0 ], [ %v196, %b1 ]
+  %v5 = phi i64 [ 0, %b0 ], [ %v195, %b1 ]
+  %v6 = phi i32 [ 0, %b0 ], [ %v198, %b1 ]
+  %v7 = and i64 %v3, 1
+  %v8 = zext i32 %v6 to i64
+  %v9 = shl i64 %v7, %v8
+  %v10 = and i64 %v4, 1
+  %v11 = add nuw nsw i32 %v6, 32
+  %v12 = zext i32 %v11 to i64
+  %v13 = shl i64 %v10, %v12
+  %v14 = or i64 %v13, %v5
+  %v15 = or i64 %v14, %v9
+  %v16 = lshr i64 %v4, 2
+  %v17 = lshr i64 %v3, 2
+  %v18 = add nuw nsw i32 %v6, 1
+  %v19 = and i64 %v17, 1
+  %v20 = zext i32 %v18 to i64
+  %v21 = shl i64 %v19, %v20
+  %v22 = and i64 %v16, 1
+  %v23 = add nsw i32 %v6, 33
+  %v24 = zext i32 %v23 to i64
+  %v25 = shl i64 %v22, %v24
+  %v26 = or i64 %v25, %v15
+  %v27 = or i64 %v26, %v21
+  %v28 = lshr i64 %v4, 4
+  %v29 = lshr i64 %v3, 4
+  %v30 = add nsw i32 %v6, 2
+  %v31 = and i64 %v29, 1
+  %v32 = zext i32 %v30 to i64
+  %v33 = shl i64 %v31, %v32
+  %v34 = and i64 %v28, 1
+  %v35 = add nsw i32 %v6, 34
+  %v36 = zext i32 %v35 to i64
+  %v37 = shl i64 %v34, %v36
+  %v38 = or i64 %v37, %v27
+  %v39 = or i64 %v38, %v33
+  %v40 = lshr i64 %v4, 6
+  %v41 = lshr i64 %v3, 6
+  %v42 = add nsw i32 %v6, 3
+  %v43 = and i64 %v41, 1
+  %v44 = zext i32 %v42 to i64
+  %v45 = shl i64 %v43, %v44
+  %v46 = and i64 %v40, 1
+  %v47 = add nsw i32 %v6, 35
+  %v48 = zext i32 %v47 to i64
+  %v49 = shl i64 %v46, %v48
+  %v50 = or i64 %v49, %v39
+  %v51 = or i64 %v50, %v45
+  %v52 = lshr i64 %v4, 8
+  %v53 = lshr i64 %v3, 8
+  %v54 = add nsw i32 %v6, 4
+  %v55 = and i64 %v53, 1
+  %v56 = zext i32 %v54 to i64
+  %v57 = shl i64 %v55, %v56
+  %v58 = and i64 %v52, 1
+  %v59 = add nsw i32 %v6, 36
+  %v60 = zext i32 %v59 to i64
+  %v61 = shl i64 %v58, %v60
+  %v62 = or i64 %v61, %v51
+  %v63 = or i64 %v62, %v57
+  %v64 = lshr i64 %v4, 10
+  %v65 = lshr i64 %v3, 10
+  %v66 = add nsw i32 %v6, 5
+  %v67 = and i64 %v65, 1
+  %v68 = zext i32 %v66 to i64
+  %v69 = shl i64 %v67, %v68
+  %v70 = and i64 %v64, 1
+  %v71 = add nsw i32 %v6, 37
+  %v72 = zext i32 %v71 to i64
+  %v73 = shl i64 %v70, %v72
+  %v74 = or i64 %v73, %v63
+  %v75 = or i64 %v74, %v69
+  %v76 = lshr i64 %v4, 12
+  %v77 = lshr i64 %v3, 12
+  %v78 = add nsw i32 %v6, 6
+  %v79 = and i64 %v77, 1
+  %v80 = zext i32 %v78 to i64
+  %v81 = shl i64 %v79, %v80
+  %v82 = and i64 %v76, 1
+  %v83 = add nsw i32 %v6, 38
+  %v84 = zext i32 %v83 to i64
+  %v85 = shl i64 %v82, %v84
+  %v86 = or i64 %v85, %v75
+  %v87 = or i64 %v86, %v81
+  %v88 = lshr i64 %v4, 14
+  %v89 = lshr i64 %v3, 14
+  %v90 = add nsw i32 %v6, 7
+  %v91 = and i64 %v89, 1
+  %v92 = zext i32 %v90 to i64
+  %v93 = shl i64 %v91, %v92
+  %v94 = and i64 %v88, 1
+  %v95 = add nsw i32 %v6, 39
+  %v96 = zext i32 %v95 to i64
+  %v97 = shl i64 %v94, %v96
+  %v98 = or i64 %v97, %v87
+  %v99 = or i64 %v98, %v93
+  %v100 = lshr i64 %v4, 16
+  %v101 = lshr i64 %v3, 16
+  %v102 = add nsw i32 %v6, 8
+  %v103 = and i64 %v101, 1
+  %v104 = zext i32 %v102 to i64
+  %v105 = shl i64 %v103, %v104
+  %v106 = and i64 %v100, 1
+  %v107 = add nsw i32 %v6, 40
+  %v108 = zext i32 %v107 to i64
+  %v109 = shl i64 %v106, %v108
+  %v110 = or i64 %v109, %v99
+  %v111 = or i64 %v110, %v105
+  %v112 = lshr i64 %v4, 18
+  %v113 = lshr i64 %v3, 18
+  %v114 = add nsw i32 %v6, 9
+  %v115 = and i64 %v113, 1
+  %v116 = zext i32 %v114 to i64
+  %v117 = shl i64 %v115, %v116
+  %v118 = and i64 %v112, 1
+  %v119 = add nsw i32 %v6, 41
+  %v120 = zext i32 %v119 to i64
+  %v121 = shl i64 %v118, %v120
+  %v122 = or i64 %v121, %v111
+  %v123 = or i64 %v122, %v117
+  %v124 = lshr i64 %v4, 20
+  %v125 = lshr i64 %v3, 20
+  %v126 = add nsw i32 %v6, 10
+  %v127 = and i64 %v125, 1
+  %v128 = zext i32 %v126 to i64
+  %v129 = shl i64 %v127, %v128
+  %v130 = and i64 %v124, 1
+  %v131 = add nsw i32 %v6, 42
+  %v132 = zext i32 %v131 to i64
+  %v133 = shl i64 %v130, %v132
+  %v134 = or i64 %v133, %v123
+  %v135 = or i64 %v134, %v129
+  %v136 = lshr i64 %v4, 22
+  %v137 = lshr i64 %v3, 22
+  %v138 = add nsw i32 %v6, 11
+  %v139 = and i64 %v137, 1
+  %v140 = zext i32 %v138 to i64
+  %v141 = shl i64 %v139, %v140
+  %v142 = and i64 %v136, 1
+  %v143 = add nsw i32 %v6, 43
+  %v144 = zext i32 %v143 to i64
+  %v145 = shl i64 %v142, %v144
+  %v146 = or i64 %v145, %v135
+  %v147 = or i64 %v146, %v141
+  %v148 = lshr i64 %v4, 24
+  %v149 = lshr i64 %v3, 24
+  %v150 = add nsw i32 %v6, 12
+  %v151 = and i64 %v149, 1
+  %v152 = zext i32 %v150 to i64
+  %v153 = shl i64 %v151, %v152
+  %v154 = and i64 %v148, 1
+  %v155 = add nsw i32 %v6, 44
+  %v156 = zext i32 %v155 to i64
+  %v157 = shl i64 %v154, %v156
+  %v158 = or i64 %v157, %v147
+  %v159 = or i64 %v158, %v153
+  %v160 = lshr i64 %v4, 26
+  %v161 = lshr i64 %v3, 26
+  %v162 = add nsw i32 %v6, 13
+  %v163 = and i64 %v161, 1
+  %v164 = zext i32 %v162 to i64
+  %v165 = shl i64 %v163, %v164
+  %v166 = and i64 %v160, 1
+  %v167 = add nsw i32 %v6, 45
+  %v168 = zext i32 %v167 to i64
+  %v169 = shl i64 %v166, %v168
+  %v170 = or i64 %v169, %v159
+  %v171 = or i64 %v170, %v165
+  %v172 = lshr i64 %v4, 28
+  %v173 = lshr i64 %v3, 28
+  %v174 = add nsw i32 %v6, 14
+  %v175 = and i64 %v173, 1
+  %v176 = zext i32 %v174 to i64
+  %v177 = shl i64 %v175, %v176
+  %v178 = and i64 %v172, 1
+  %v179 = add nsw i32 %v6, 46
+  %v180 = zext i32 %v179 to i64
+  %v181 = shl i64 %v178, %v180
+  %v182 = or i64 %v181, %v171
+  %v183 = or i64 %v182, %v177
+  %v184 = lshr i64 %v4, 30
+  %v185 = lshr i64 %v3, 30
+  %v186 = add nsw i32 %v6, 15
+  %v187 = and i64 %v185, 1
+  %v188 = zext i32 %v186 to i64
+  %v189 = shl i64 %v187, %v188
+  %v190 = and i64 %v184, 1
+  %v191 = add nsw i32 %v6, 47
+  %v192 = zext i32 %v191 to i64
+  %v193 = shl i64 %v190, %v192
+  %v194 = or i64 %v193, %v183
+  %v195 = or i64 %v194, %v189
+  %v196 = lshr i64 %v4, 32
+  %v197 = lshr i64 %v3, 32
+  %v198 = add nsw i32 %v6, 16
+  %v199 = icmp eq i32 %v198, 32
+  br i1 %v199, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret i64 %v195
+}
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/aggressive_licm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/aggressive_licm.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/aggressive_licm.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/aggressive_licm.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,68 @@
+; RUN: llc -march=hexagon -disable-block-placement=0 -O2 < %s | FileCheck %s
+; CHECK: [[Reg:r[0-9]+]] = {{lsr\(r[0-9]+,#16\)|extractu\(r[0-9]+,#16,#16\)}}
+; CHECK-NOT: [[Reg]] = #0
+; CHECK: align
+; CHECK-NEXT: LBB
+
+target triple = "hexagon"
+
+ at g0 = common global [4 x i16] zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i16, align 2
+  call void @f1(i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 0), i16* %v0) #0
+  %v1 = load i16, i16* %v0, align 2, !tbaa !0
+  %v2 = icmp slt i16 %v1, -15
+  br i1 %v2, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v3 = load i32, i32* bitcast ([4 x i16]* @g0 to i32*), align 8
+  %v4 = trunc i32 %v3 to i16
+  %v5 = lshr i32 %v3, 16
+  %v6 = trunc i32 %v5 to i16
+  %v7 = load i32, i32* bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 2) to i32*), align 4
+  %v8 = trunc i32 %v7 to i16
+  %v9 = lshr i32 %v7, 16
+  %v10 = trunc i32 %v9 to i16
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v11 = phi i16 [ %v1, %b1 ], [ %v20, %b2 ]
+  %v12 = phi i16 [ %v10, %b1 ], [ 0, %b2 ]
+  %v13 = phi i16 [ %v8, %b1 ], [ %v12, %b2 ]
+  %v14 = phi i16 [ %v6, %b1 ], [ %v13, %b2 ]
+  %v15 = phi i16 [ %v4, %b1 ], [ %v14, %b2 ]
+  %v16 = phi i16 [ 0, %b1 ], [ %v19, %b2 ]
+  %v17 = icmp ne i16 %v16, 0
+  %v18 = zext i1 %v17 to i16
+  %v19 = or i16 %v15, %v18
+  %v20 = add i16 %v11, 16
+  %v21 = icmp slt i16 %v20, -15
+  br i1 %v21, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  store i16 %v14, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 0), align 8, !tbaa !0
+  store i16 %v13, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 1), align 2, !tbaa !0
+  store i16 %v12, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 2), align 4, !tbaa !0
+  store i16 0, i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 3), align 2, !tbaa !0
+  store i16 %v20, i16* %v0, align 2, !tbaa !0
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v22 = phi i16 [ %v19, %b3 ], [ 0, %b0 ]
+  call void @f2(i16* getelementptr inbounds ([4 x i16], [4 x i16]* @g0, i32 0, i32 0), i16 signext %v22) #0
+  ret i32 0
+}
+
+declare void @f1(i16*, i16*) #0
+
+declare void @f2(i16*, i16 signext) #0
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/align_Os.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/align_Os.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/align_Os.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/align_Os.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: {{.balign 4|.p2align 2}}
+; CHECK: {{.balign 4|.p2align 2}}
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind optsize readnone
+define i32 @f0() #0 section ".mysection.main" {
+b0:
+  ret i32 0
+}
+
+; Function Attrs: nounwind optsize readnone
+define i32 @f1() #0 section ".mysection.anothermain" {
+b0:
+  ret i32 0
+}
+
+attributes #0 = { nounwind optsize readnone "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/align_test.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/align_test.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/align_test.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/align_test.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,72 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: memw
+; CHECK: memub
+
+target triple = "hexagon"
+
+%s.0 = type <{ i8, %s.1 }>
+%s.1 = type { [16 x i32] }
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, %s.0* nocapture %a1) #0 {
+b0:
+  %v0 = icmp sgt i32 %a0, 0
+  br i1 %v0, label %b1, label %b10
+
+b1:                                               ; preds = %b0
+  br label %b4
+
+b2:                                               ; preds = %b4
+  br i1 %v0, label %b3, label %b10
+
+b3:                                               ; preds = %b2
+  br label %b7
+
+b4:                                               ; preds = %b4, %b1
+  %v1 = phi i32 [ %v6, %b4 ], [ 0, %b1 ]
+  %v2 = phi i32 [ %v5, %b4 ], [ 0, %b1 ]
+  %v3 = getelementptr inbounds %s.0, %s.0* %a1, i32 0, i32 1, i32 0, i32 %v1
+  %v4 = load i32, i32* %v3, align 1, !tbaa !0
+  %v5 = add nsw i32 %v4, %v2
+  %v6 = add nsw i32 %v1, 1
+  %v7 = icmp eq i32 %v6, %a0
+  br i1 %v7, label %b2, label %b4
+
+b5:                                               ; preds = %b7
+  br i1 %v0, label %b6, label %b10
+
+b6:                                               ; preds = %b5
+  br label %b8
+
+b7:                                               ; preds = %b7, %b3
+  %v8 = phi i32 [ %v13, %b7 ], [ 0, %b3 ]
+  %v9 = phi i32 [ %v12, %b7 ], [ %v5, %b3 ]
+  %v10 = getelementptr inbounds %s.0, %s.0* %a1, i32 0, i32 1, i32 0, i32 %v8
+  %v11 = load i32, i32* %v10, align 1, !tbaa !0
+  %v12 = add nsw i32 %v11, %v9
+  %v13 = add nsw i32 %v8, 1
+  %v14 = icmp eq i32 %v13, %a0
+  br i1 %v14, label %b5, label %b7
+
+b8:                                               ; preds = %b8, %b6
+  %v15 = phi i32 [ %v17, %b8 ], [ 0, %b6 ]
+  %v16 = getelementptr inbounds %s.0, %s.0* %a1, i32 0, i32 1, i32 0, i32 %v15
+  store i32 %a0, i32* %v16, align 1, !tbaa !0
+  %v17 = add nsw i32 %v15, 1
+  %v18 = icmp eq i32 %v17, %a0
+  br i1 %v18, label %b9, label %b8
+
+b9:                                               ; preds = %b8
+  br label %b10
+
+b10:                                              ; preds = %b9, %b5, %b2, %b0
+  %v19 = phi i32 [ %v12, %b5 ], [ %v5, %b2 ], [ 0, %b0 ], [ %v12, %b9 ]
+  ret i32 %v19
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/asr-rnd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/asr-rnd.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/asr-rnd.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/asr-rnd.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check if we generate rounding-asr instruction.  It is equivalent to
+; Rd = ((Rs >> #u) +1) >> 1.
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+; CHECK: asr{{.*}}:rnd
+  %v0 = alloca i32, align 4
+  store i32 %a0, i32* %v0, align 4
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = ashr i32 %v1, 10
+  %v3 = add nsw i32 %v2, 1
+  %v4 = ashr i32 %v3, 1
+  ret i32 %v4
+}
+
+; Function Attrs: nounwind
+define i64 @f1(i64 %a0) #0 {
+b0:
+; CHECK: asr{{.*}}:rnd
+  %v0 = alloca i64, align 8
+  store i64 %a0, i64* %v0, align 8
+  %v1 = load i64, i64* %v0, align 8
+  %v2 = ashr i64 %v1, 17
+  %v3 = add nsw i64 %v2, 1
+  %v4 = ashr i64 %v3, 1
+  ret i64 %v4
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/asr-rnd64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/asr-rnd64.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/asr-rnd64.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/asr-rnd64.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,30 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check if we generate rounding-asr instruction.  It is equivalent to
+; Rd = ((Rs >> #u) +1) >> 1.
+
+target triple = "hexagon"
+
+define i32 @f0(i32 %a0) {
+b0:
+; CHECK: asr{{.*}}:rnd
+  %v0 = alloca i32, align 4
+  store i32 %a0, i32* %v0, align 4
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = ashr i32 %v1, 10
+  %v3 = add nsw i32 %v2, 1
+  %v4 = ashr i32 %v3, 1
+  ret i32 %v4
+}
+
+define i64 @f1(i64 %a0) {
+b0:
+; CHECK: asr{{.*}}:rnd
+  %v0 = alloca i64, align 8
+  store i64 %a0, i64* %v0, align 8
+  %v1 = load i64, i64* %v0, align 8
+  %v2 = ashr i64 %v1, 17
+  %v3 = add nsw i64 %v2, 1
+  %v4 = ashr i64 %v3, 1
+  ret i64 %v4
+}

Added: llvm/trunk/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/assert-postinc-ptr-not-value.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,256 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: f1
+
+target triple = "hexagon"
+
+%s.0 = type { i32 }
+
+ at g0 = internal unnamed_addr global %s.0* null, section ".data.............", align 4
+ at g1 = internal global i32 0, section ".data.............", align 4
+
+; Function Attrs: nounwind
+define %s.0* @f0(i32* %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds i32, i32* %a0, i32 -1
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = and i32 %v1, -3
+  store i32 %v2, i32* %v0, align 4
+  %v3 = getelementptr inbounds i32, i32* %a0, i32 -2
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = lshr i32 %v4, 2
+  %v6 = xor i32 %v5, -1
+  %v7 = getelementptr inbounds i32, i32* %a0, i32 %v6
+  %v8 = lshr i32 %v1, 2
+  %v9 = add i32 %v8, -1
+  %v10 = getelementptr inbounds i32, i32* %a0, i32 %v9
+  %v11 = load i32, i32* %v10, align 4
+  %v12 = lshr i32 %v11, 2
+  %v13 = icmp eq i32 %v12, 0
+  br i1 %v13, label %b3, label %b1
+
+b1:                                               ; preds = %b0
+  %v14 = add i32 %v12, %v9
+  %v15 = getelementptr inbounds i32, i32* %a0, i32 %v14
+  %v16 = load i32, i32* %v15, align 4
+  %v17 = and i32 %v16, 1
+  %v18 = icmp eq i32 %v17, 0
+  br i1 %v18, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  %v19 = add nsw i32 %v12, %v8
+  %v20 = shl i32 %v19, 2
+  %v21 = and i32 %v1, 1
+  %v22 = or i32 %v20, %v21
+  store i32 %v22, i32* %v0, align 4
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1, %b0
+  %v23 = phi i32 [ %v2, %b1 ], [ %v2, %b0 ], [ %v22, %b2 ]
+  %v24 = and i32 %v23, 1
+  %v25 = icmp eq i32 %v24, 0
+  br i1 %v25, label %b5, label %b4
+
+b4:                                               ; preds = %b3
+  %v26 = load i32, i32* %v7, align 4
+  %v27 = and i32 %v26, -4
+  %v28 = add i32 %v27, %v23
+  %v29 = and i32 %v28, -4
+  %v30 = and i32 %v26, 3
+  %v31 = or i32 %v29, %v30
+  store i32 %v31, i32* %v7, align 4
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3
+  %v32 = phi i32 [ %v31, %b4 ], [ %v23, %b3 ]
+  %v33 = phi i32* [ %v7, %b4 ], [ %v0, %b3 ]
+  %v34 = bitcast i32* %v33 to %s.0*
+  %v35 = lshr i32 %v32, 2
+  %v36 = add i32 %v35, -1
+  %v37 = getelementptr inbounds %s.0, %s.0* %v34, i32 %v36, i32 0
+  %v38 = load i32, i32* %v37, align 4
+  %v39 = shl nuw i32 %v35, 2
+  %v40 = and i32 %v38, 3
+  %v41 = or i32 %v40, %v39
+  store i32 %v41, i32* %v37, align 4
+  %v42 = load i32, i32* %v33, align 4
+  %v43 = lshr i32 %v42, 2
+  %v44 = getelementptr inbounds %s.0, %s.0* %v34, i32 %v43, i32 0
+  %v45 = load i32, i32* %v44, align 4
+  %v46 = or i32 %v45, 1
+  store i32 %v46, i32* %v44, align 4
+  ret %s.0* %v34
+}
+
+; Function Attrs: nounwind
+define i64 @f1(i32 %a0) #0 {
+b0:
+  %v0 = load %s.0*, %s.0** @g0, align 4, !tbaa !0
+  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 7
+  tail call void @f2(i32* @g1) #0
+  br label %b1
+
+b1:                                               ; preds = %b5, %b0
+  %v2 = phi %s.0* [ %v1, %b0 ], [ %v20, %b5 ]
+  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 0
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = and i32 %v4, 2
+  %v6 = icmp eq i32 %v5, 0
+  br i1 %v6, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  tail call fastcc void @f8()
+  %v7 = getelementptr inbounds %s.0, %s.0* %v2, i32 1, i32 0
+  %v8 = tail call %s.0* @f0(i32* %v7)
+  tail call fastcc void @f7()
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v9 = phi %s.0* [ %v8, %b2 ], [ %v2, %b1 ]
+  %v10 = getelementptr inbounds %s.0, %s.0* %v9, i32 0, i32 0
+  %v11 = load i32, i32* %v10, align 4
+  %v12 = lshr i32 %v11, 2
+  %v13 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v12, i32 0
+  %v14 = load i32, i32* %v13, align 4
+  %v15 = and i32 %v14, 1
+  %v16 = icmp eq i32 %v15, 0
+  br i1 %v16, label %b5, label %b4
+
+b4:                                               ; preds = %b3
+  %v17 = mul i32 %v12, 4
+  %v18 = add i32 %v17, -4
+  %v19 = icmp ult i32 %v18, %a0
+  br i1 %v19, label %b5, label %b7
+
+b5:                                               ; preds = %b4, %b3
+  %v20 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v12
+  %v21 = icmp ult i32 %v14, 4
+  br i1 %v21, label %b6, label %b1
+
+b6:                                               ; preds = %b5
+  tail call fastcc void @f3()
+  br label %b11
+
+b7:                                               ; preds = %b4
+  %v22 = add i32 %a0, 4
+  %v23 = lshr i32 %v22, 2
+  %v24 = add i32 %v23, 8
+  %v25 = lshr i32 %v24, 3
+  %v26 = mul nsw i32 %v25, 8
+  %v27 = sub nsw i32 %v12, %v26
+  %v28 = icmp sgt i32 %v27, 7
+  br i1 %v28, label %b8, label %b9
+
+b8:                                               ; preds = %b7
+  %v29 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v26, i32 0
+  %v30 = shl i32 %v27, 2
+  store i32 %v30, i32* %v29, align 4
+  %v31 = load i32, i32* %v10, align 4
+  %v32 = lshr i32 %v31, 2
+  %v33 = add i32 %v32, -1
+  %v34 = getelementptr inbounds %s.0, %s.0* %v9, i32 %v33, i32 0
+  %v35 = load i32, i32* %v34, align 4
+  %v36 = and i32 %v35, 3
+  %v37 = or i32 %v36, %v30
+  store i32 %v37, i32* %v34, align 4
+  %v38 = load i32, i32* %v10, align 4
+  %v39 = mul i32 %v25, 32
+  %v40 = and i32 %v38, 3
+  %v41 = or i32 %v40, %v39
+  store i32 %v41, i32* %v10, align 4
+  br label %b10
+
+b9:                                               ; preds = %b7
+  %v42 = and i32 %v14, -2
+  store i32 %v42, i32* %v13, align 4
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  tail call fastcc void @f3()
+  %v43 = getelementptr inbounds %s.0, %s.0* %v9, i32 1
+  %v44 = load i32, i32* %v10, align 4
+  %v45 = lshr i32 %v44, 2
+  %v46 = mul i32 %v45, 4
+  %v47 = add i32 %v46, -4
+  %v48 = ptrtoint %s.0* %v43 to i32
+  %v49 = zext i32 %v47 to i64
+  %v50 = shl nuw i64 %v49, 32
+  %v51 = zext i32 %v48 to i64
+  br label %b11
+
+b11:                                              ; preds = %b10, %b6
+  %v52 = phi i64 [ 0, %b6 ], [ %v51, %b10 ]
+  %v53 = phi i64 [ 0, %b6 ], [ %v50, %b10 ]
+  %v54 = or i64 %v53, %v52
+  ret i64 %v54
+}
+
+declare void @f2(i32*) #0
+
+; Function Attrs: inlinehint nounwind
+define internal fastcc void @f3() #1 {
+b0:
+  store i32 0, i32* @g1, align 4, !tbaa !4
+  ret void
+}
+
+; Function Attrs: nounwind
+define void @f4(i32* nocapture %a0) #0 {
+b0:
+  %v0 = getelementptr inbounds i32, i32* %a0, i32 -1
+  tail call void @f2(i32* @g1) #0
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = or i32 %v1, 2
+  store i32 %v2, i32* %v0, align 4
+  tail call fastcc void @f3()
+  ret void
+}
+
+; Function Attrs: nounwind
+define %s.0* @f5(i32* %a0) #0 {
+b0:
+  tail call void @f2(i32* @g1) #0
+  %v0 = tail call %s.0* @f0(i32* %a0)
+  tail call fastcc void @f3()
+  ret %s.0* %v0
+}
+
+; Function Attrs: nounwind
+define void @f6(%s.0* %a0, i32 %a1) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a0, i32 7, i32 0
+  %v1 = mul i32 %a1, 4
+  %v2 = add i32 %v1, -32
+  store i32 %v2, i32* %v0, align 4
+  %v3 = add i32 %a1, -1
+  %v4 = getelementptr inbounds %s.0, %s.0* %a0, i32 %v3, i32 0
+  store i32 1, i32* %v4, align 4
+  store i32 0, i32* @g1, align 4, !tbaa !4
+  store %s.0* %a0, %s.0** @g0, align 4
+  ret void
+}
+
+; Function Attrs: inlinehint nounwind
+define internal fastcc void @f7() #1 {
+b0:
+  tail call void asm sideeffect " nop", "~{memory}"() #0, !srcloc !6
+  ret void
+}
+
+; Function Attrs: inlinehint nounwind
+define internal fastcc void @f8() #1 {
+b0:
+  tail call void asm sideeffect " nop", "~{memory}"() #0, !srcloc !7
+  ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { inlinehint nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !2}
+!6 = !{i32 782713}
+!7 = !{i32 782625}

Added: llvm/trunk/test/CodeGen/Hexagon/avoidVectorLowering.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/avoidVectorLowering.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/avoidVectorLowering.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/avoidVectorLowering.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; CHECK-NOT: vmem
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = common global [32 x i16] zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  call void @llvm.memset.p0i8.i32(i8* align 8 bitcast ([32 x i16]* @g0 to i8*), i8 0, i32 64, i1 false)
+  ret i32 0
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/base-offset-stv4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/base-offset-stv4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/base-offset-stv4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/base-offset-stv4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,31 @@
+; RUN: llc -march=hexagon -enable-pipeliner < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v0 = load i16*, i16** undef, align 4
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v1 = phi i32 [ 13, %b1 ], [ %v5, %b2 ]
+  %v2 = getelementptr inbounds i16, i16* %v0, i32 %v1
+  %v3 = add nsw i32 0, %v1
+  %v4 = getelementptr inbounds i16, i16* %v0, i32 %v3
+  store i16 0, i16* %v4, align 2
+  store i16 0, i16* %v2, align 2
+  %v5 = add i32 %v1, 1
+  %v6 = icmp eq i32 %v5, 26
+  br i1 %v6, label %b3, label %b2
+
+b3:                                               ; preds = %b3, %b2
+  br i1 undef, label %b4, label %b3
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/bkfir.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bkfir.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bkfir.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bkfir.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,104 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+; Check for successful compilation.
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind optsize
+define void @f0(i16* nocapture readonly %a0, i16* nocapture readonly %a1, i16* nocapture %a2, i32 %a3, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = bitcast i16* %a0 to i64*
+  %v1 = bitcast i16* %a1 to i64*
+  %v2 = icmp sgt i32 %a5, 0
+  br i1 %v2, label %b1, label %b6
+
+b1:                                               ; preds = %b0
+  %v3 = icmp sgt i32 %a4, 0
+  %v4 = getelementptr i16, i16* %a2, i32 5
+  %v5 = getelementptr i16, i16* %a2, i32 6
+  %v6 = getelementptr i16, i16* %a2, i32 7
+  br label %b2
+
+b2:                                               ; preds = %b5, %b1
+  %v7 = phi i16* [ %a2, %b1 ], [ %v12, %b5 ]
+  %v8 = phi i16* [ %v4, %b1 ], [ %v59, %b5 ]
+  %v9 = phi i16* [ %v5, %b1 ], [ %v60, %b5 ]
+  %v10 = phi i16* [ %v6, %b1 ], [ %v61, %b5 ]
+  %v11 = phi i32 [ 0, %b1 ], [ %v57, %b5 ]
+  %v12 = getelementptr i16, i16* %v7, i32 4
+  br i1 %v3, label %b3, label %b5
+
+b3:                                               ; preds = %b3, %b2
+  %v13 = phi i32 [ %v43, %b3 ], [ 0, %b2 ]
+  %v14 = phi i32 [ %v30, %b3 ], [ 0, %b2 ]
+  %v15 = phi i32 [ %v34, %b3 ], [ 0, %b2 ]
+  %v16 = phi i32 [ %v42, %b3 ], [ 0, %b2 ]
+  %v17 = phi i32 [ %v38, %b3 ], [ 0, %b2 ]
+  %v18 = add nsw i32 %v13, %v11
+  %v19 = sdiv i32 %v18, 4
+  %v20 = getelementptr inbounds i64, i64* %v0, i32 %v19
+  %v21 = load i64, i64* %v20, align 8
+  %v22 = add nsw i32 %v19, 1
+  %v23 = getelementptr inbounds i64, i64* %v0, i32 %v22
+  %v24 = load i64, i64* %v23, align 8
+  %v25 = sdiv i32 %v13, 4
+  %v26 = getelementptr inbounds i64, i64* %v1, i32 %v25
+  %v27 = load i64, i64* %v26, align 8
+  %v28 = sext i32 %v14 to i64
+  %v29 = tail call i64 @llvm.hexagon.M2.vrmac.s0(i64 %v28, i64 %v21, i64 %v27)
+  %v30 = trunc i64 %v29 to i32
+  %v31 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v24, i64 %v21, i32 2)
+  %v32 = sext i32 %v15 to i64
+  %v33 = tail call i64 @llvm.hexagon.M2.vrmac.s0(i64 %v32, i64 %v31, i64 %v27)
+  %v34 = trunc i64 %v33 to i32
+  %v35 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v24, i64 %v21, i32 4)
+  %v36 = sext i32 %v17 to i64
+  %v37 = tail call i64 @llvm.hexagon.M2.vrmac.s0(i64 %v36, i64 %v35, i64 %v27)
+  %v38 = trunc i64 %v37 to i32
+  %v39 = tail call i64 @llvm.hexagon.S2.valignib(i64 %v24, i64 %v21, i32 6)
+  %v40 = sext i32 %v16 to i64
+  %v41 = tail call i64 @llvm.hexagon.M2.vrmac.s0(i64 %v40, i64 %v39, i64 %v27)
+  %v42 = trunc i64 %v41 to i32
+  %v43 = add nsw i32 %v13, 4
+  %v44 = icmp slt i32 %v43, %a4
+  br i1 %v44, label %b3, label %b4
+
+b4:                                               ; preds = %b3
+  %v45 = ashr i32 %v30, 18
+  %v46 = trunc i32 %v45 to i16
+  %v47 = ashr i32 %v34, 18
+  %v48 = trunc i32 %v47 to i16
+  %v49 = ashr i32 %v38, 18
+  %v50 = trunc i32 %v49 to i16
+  %v51 = ashr i32 %v42, 18
+  %v52 = trunc i32 %v51 to i16
+  br label %b5
+
+b5:                                               ; preds = %b4, %b2
+  %v53 = phi i16 [ %v46, %b4 ], [ 0, %b2 ]
+  %v54 = phi i16 [ %v48, %b4 ], [ 0, %b2 ]
+  %v55 = phi i16 [ %v52, %b4 ], [ 0, %b2 ]
+  %v56 = phi i16 [ %v50, %b4 ], [ 0, %b2 ]
+  %v57 = add nsw i32 %v11, 4
+  store i16 %v53, i16* %v12, align 8
+  store i16 %v54, i16* %v8, align 8
+  store i16 %v56, i16* %v9, align 8
+  store i16 %v55, i16* %v10, align 8
+  %v58 = icmp slt i32 %v57, %a5
+  %v59 = getelementptr i16, i16* %v8, i32 4
+  %v60 = getelementptr i16, i16* %v9, i32 4
+  %v61 = getelementptr i16, i16* %v10, i32 4
+  br i1 %v58, label %b2, label %b6
+
+b6:                                               ; preds = %b5, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vrmac.s0(i64, i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.valignib(i64, i64, i32) #1
+
+attributes #0 = { nounwind optsize }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/block-address.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/block-address.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/block-address.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/block-address.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,36 @@
+; RUN: llc -march=hexagon < %s
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s
+; REQUIRES: asserts
+
+ at g0 = external global i8*
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = load i8*, i8** @g0, align 4, !tbaa !0
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i8* [ %v0, %b0 ], [ %v5, %b1 ]
+  %v2 = phi i32 [ %a0, %b0 ], [ %v3, %b1 ]
+  %v3 = add nsw i32 %v2, 10
+  %v4 = tail call i32 @f1(i8* %v1, i8* blockaddress(@f0, %b1), i8* blockaddress(@f0, %b2)) #0
+  %v5 = load i8*, i8** @g0, align 4, !tbaa !0
+  indirectbr i8* %v5, [label %b1, label %b2]
+
+b2:                                               ; preds = %b1
+  %v6 = add nsw i32 %v2, 19
+  %v7 = add i32 %v2, 69
+  %v8 = add i32 %v7, %v3
+  %v9 = mul nsw i32 %v8, %v6
+  ret i32 %v9
+}
+
+declare i32 @f1(i8*, i8*, i8*)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/blockaddr-fpic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/blockaddr-fpic.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/blockaddr-fpic.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/blockaddr-fpic.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,157 @@
+; RUN: llc -march=hexagon -relocation-model=pic -O2 < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = add(pc,##.Ltmp0 at PCREL)
+; CHECK-NOT: r{{[0-9]+}} = ##.Ltmp0
+
+target triple = "hexagon"
+
+%s.0 = type { [7 x i8*], [7 x i8*], [12 x i8*], [12 x i8*], [2 x i8*], i8*, i8*, i8*, i8* }
+%s.1 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+
+ at g0 = private unnamed_addr constant [4 x i8] c"Sun\00", align 1
+ at g1 = private unnamed_addr constant [4 x i8] c"Mon\00", align 1
+ at g2 = private unnamed_addr constant [4 x i8] c"Tue\00", align 1
+ at g3 = private unnamed_addr constant [4 x i8] c"Wed\00", align 1
+ at g4 = private unnamed_addr constant [4 x i8] c"Thu\00", align 1
+ at g5 = private unnamed_addr constant [4 x i8] c"Fri\00", align 1
+ at g6 = private unnamed_addr constant [4 x i8] c"Sat\00", align 1
+ at g7 = private unnamed_addr constant [7 x i8] c"Sunday\00", align 1
+ at g8 = private unnamed_addr constant [7 x i8] c"Monday\00", align 1
+ at g9 = private unnamed_addr constant [8 x i8] c"Tuesday\00", align 1
+ at g10 = private unnamed_addr constant [10 x i8] c"Wednesday\00", align 1
+ at g11 = private unnamed_addr constant [9 x i8] c"Thursday\00", align 1
+ at g12 = private unnamed_addr constant [7 x i8] c"Friday\00", align 1
+ at g13 = private unnamed_addr constant [9 x i8] c"Saturday\00", align 1
+ at g14 = private unnamed_addr constant [4 x i8] c"Jan\00", align 1
+ at g15 = private unnamed_addr constant [4 x i8] c"Feb\00", align 1
+ at g16 = private unnamed_addr constant [4 x i8] c"Mar\00", align 1
+ at g17 = private unnamed_addr constant [4 x i8] c"Apr\00", align 1
+ at g18 = private unnamed_addr constant [4 x i8] c"May\00", align 1
+ at g19 = private unnamed_addr constant [4 x i8] c"Jun\00", align 1
+ at g20 = private unnamed_addr constant [4 x i8] c"Jul\00", align 1
+ at g21 = private unnamed_addr constant [4 x i8] c"Aug\00", align 1
+ at g22 = private unnamed_addr constant [4 x i8] c"Sep\00", align 1
+ at g23 = private unnamed_addr constant [4 x i8] c"Oct\00", align 1
+ at g24 = private unnamed_addr constant [4 x i8] c"Nov\00", align 1
+ at g25 = private unnamed_addr constant [4 x i8] c"Dec\00", align 1
+ at g26 = private unnamed_addr constant [8 x i8] c"January\00", align 1
+ at g27 = private unnamed_addr constant [9 x i8] c"February\00", align 1
+ at g28 = private unnamed_addr constant [6 x i8] c"March\00", align 1
+ at g29 = private unnamed_addr constant [6 x i8] c"April\00", align 1
+ at g30 = private unnamed_addr constant [5 x i8] c"June\00", align 1
+ at g31 = private unnamed_addr constant [5 x i8] c"July\00", align 1
+ at g32 = private unnamed_addr constant [7 x i8] c"August\00", align 1
+ at g33 = private unnamed_addr constant [10 x i8] c"September\00", align 1
+ at g34 = private unnamed_addr constant [8 x i8] c"October\00", align 1
+ at g35 = private unnamed_addr constant [9 x i8] c"November\00", align 1
+ at g36 = private unnamed_addr constant [9 x i8] c"December\00", align 1
+ at g37 = private unnamed_addr constant [3 x i8] c"AM\00", align 1
+ at g38 = private unnamed_addr constant [3 x i8] c"PM\00", align 1
+ at g39 = private unnamed_addr constant [21 x i8] c"%a %b %e %H:%M:%S %Y\00", align 1
+ at g40 = private unnamed_addr constant [9 x i8] c"%m/%d/%y\00", align 1
+ at g41 = private unnamed_addr constant [9 x i8] c"%H:%M:%S\00", align 1
+ at g42 = private unnamed_addr constant [12 x i8] c"%I:%M:%S %p\00", align 1
+ at g43 = constant %s.0 { [7 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g2, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g3, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g4, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g5, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g6, i32 0, i32 0)], [7 x i8*] [i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g7, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g8, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g9, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g10, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g11, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g12, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g13, i32 0, i32 0)], [12 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g14, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g15, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g16, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g17, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g18, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g19, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g20, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g21, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g22, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g23, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g24, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g25, i32 0, i32 0)], [12 x i8*] [i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g26, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g27, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g28, i32 0, i32 0), i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g29, i32 0, i32 0), i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g18, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g30, i32 0, i32 0), i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g31, i32 0, i32 0), i8* getelementptr inbounds ([7 x i8], [7 x i8]* @g32, i32 0, i32 0), i8* getelementptr inbounds ([10 x i8], [10 x i8]* @g33, i32 0, i32 0), i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g34, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g35, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g36, i32 0, i32 0)], [2 x i8*] [i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g37, i32 0, i32 0), i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g38, i32 0, i32 0)], i8* getelementptr inbounds ([21 x i8], [21 x i8]* @g39, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), i8* getelementptr inbounds ([9 x i8], [9 x i8]* @g41, i32 0, i32 0), i8* getelementptr inbounds ([12 x i8], [12 x i8]* @g42, i32 0, i32 0) }, align 4
+ at g44 = global %s.0* @g43, align 4
+ at g45 = private unnamed_addr constant [6 x i8] c"%H:%M\00", align 1
+
+; Function Attrs: nounwind readonly
+define i8* @f0(i8* readonly %a0, i8* nocapture readonly %a1, %s.1* readonly %a2) #0 {
+b0:
+  %v0 = icmp eq i8* %a0, null
+  br i1 %v0, label %b15, label %b1
+
+b1:                                               ; preds = %b0
+  %v1 = load %s.0*, %s.0** @g44, align 4, !tbaa !0
+  %v2 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 5
+  %v3 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 6
+  br label %b2
+
+b2:                                               ; preds = %b14, %b6, %b1
+  %v4 = phi i32 [ undef, %b1 ], [ %v31, %b14 ], [ 0, %b6 ]
+  %v5 = phi i8* [ %a0, %b1 ], [ %v30, %b14 ], [ %v18, %b6 ]
+  %v6 = phi i8* [ %a1, %b1 ], [ %v13, %b14 ], [ %v13, %b6 ]
+  %v7 = load i8, i8* %v6, align 1, !tbaa !4
+  %v8 = icmp eq i8 %v7, 0
+  br i1 %v8, label %b15, label %b3
+
+b3:                                               ; preds = %b2
+  %v9 = getelementptr inbounds i8, i8* %v6, i32 1
+  br label %b4
+
+b4:                                               ; preds = %b7, %b3
+  %v10 = phi i8* [ %v6, %b3 ], [ %v11, %b7 ]
+  %v11 = phi i8* [ %v9, %b3 ], [ %v13, %b7 ]
+  %v12 = phi i32 [ %v4, %b3 ], [ %v21, %b7 ]
+  %v13 = getelementptr inbounds i8, i8* %v10, i32 2
+  %v14 = load i8, i8* %v11, align 1, !tbaa !4
+  %v15 = zext i8 %v14 to i32
+  switch i32 %v15, label %b15 [
+    i32 37, label %b5
+    i32 69, label %b7
+    i32 79, label %b8
+    i32 99, label %b13
+    i32 68, label %b9
+    i32 82, label %b10
+    i32 120, label %b12
+  ]
+
+b5:                                               ; preds = %b4
+  %v16 = load i8, i8* %v5, align 1, !tbaa !4
+  %v17 = icmp eq i8 %v14, %v16
+  br i1 %v17, label %b6, label %b15
+
+b6:                                               ; preds = %b5
+  %v18 = getelementptr inbounds i8, i8* %v5, i32 1
+  %v19 = icmp eq i32 %v12, 0
+  br i1 %v19, label %b2, label %b15
+
+b7:                                               ; preds = %b10, %b9, %b8, %b4
+  %v20 = phi i8* [ blockaddress(@f0, %b4), %b8 ], [ blockaddress(@f0, %b11), %b9 ], [ blockaddress(@f0, %b11), %b10 ], [ blockaddress(@f0, %b4), %b4 ]
+  %v21 = phi i32 [ 2, %b8 ], [ 1, %b9 ], [ 1, %b10 ], [ 1, %b4 ]
+  %v22 = phi i8* [ getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), %b8 ], [ getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), %b9 ], [ getelementptr inbounds ([6 x i8], [6 x i8]* @g45, i32 0, i32 0), %b10 ], [ getelementptr inbounds ([9 x i8], [9 x i8]* @g40, i32 0, i32 0), %b4 ]
+  %v23 = icmp eq i32 %v12, 0
+  %v24 = select i1 %v23, i8* %v20, i8* blockaddress(@f0, %b15)
+  indirectbr i8* %v24, [label %b4, label %b11, label %b15]
+
+b8:                                               ; preds = %b4
+  br label %b7
+
+b9:                                               ; preds = %b4
+  br label %b7
+
+b10:                                              ; preds = %b4
+  br label %b7
+
+b11:                                              ; preds = %b7
+  %v25 = tail call i8* @f0(i8* %v5, i8* %v22, %s.1* %a2) #1
+  br label %b14
+
+b12:                                              ; preds = %b4
+  br label %b13
+
+b13:                                              ; preds = %b12, %b4
+  %v26 = phi i8** [ %v3, %b12 ], [ %v2, %b4 ]
+  %v27 = load i8*, i8** %v26, align 4
+  %v28 = tail call i8* @f0(i8* %v5, i8* %v27, %s.1* %a2) #1
+  %v29 = icmp ugt i32 %v12, 1
+  br i1 %v29, label %b15, label %b14
+
+b14:                                              ; preds = %b13, %b11
+  %v30 = phi i8* [ %v28, %b13 ], [ %v25, %b11 ]
+  %v31 = phi i32 [ %v12, %b13 ], [ 0, %b11 ]
+  %v32 = icmp eq i8* %v30, null
+  br i1 %v32, label %b15, label %b2
+
+b15:                                              ; preds = %b14, %b13, %b7, %b6, %b5, %b4, %b2, %b0
+  %v33 = phi i8* [ null, %b0 ], [ null, %b4 ], [ null, %b7 ], [ null, %b13 ], [ null, %b14 ], [ %v5, %b2 ], [ null, %b5 ], [ null, %b6 ]
+  ret i8* %v33
+}
+
+attributes #0 = { nounwind readonly }
+attributes #1 = { nobuiltin nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/brcond-setne.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/brcond-setne.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/brcond-setne.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/brcond-setne.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,85 @@
+; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
+; CHECK: cmpb.eq
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define zeroext i8 @f0(i8** nocapture %a0, i32* nocapture %a1) #0 {
+b0:
+  %v0 = load i8*, i8** %a0, align 4, !tbaa !0
+  %v1 = load i8, i8* %v0, align 1, !tbaa !4
+  %v2 = icmp eq i8 %v1, 0
+  br i1 %v2, label %b11, label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b9, %b1
+  %v3 = phi i8 [ %v20, %b9 ], [ %v1, %b1 ]
+  %v4 = phi i8 [ %v17, %b9 ], [ 0, %b1 ]
+  %v5 = phi i8* [ %v18, %b9 ], [ %v0, %b1 ]
+  %v6 = icmp eq i8 %v3, 44
+  br i1 %v6, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  %v7 = phi i8* [ %v5, %b2 ]
+  %v8 = phi i8 [ %v4, %b2 ]
+  %v9 = getelementptr inbounds i8, i8* %v7, i32 1
+  br label %b11
+
+b4:                                               ; preds = %b2
+  %v10 = icmp eq i8 %v4, 0
+  br i1 %v10, label %b5, label %b9
+
+b5:                                               ; preds = %b4
+  %v11 = tail call zeroext i8 @f1(i8 zeroext %v3) #0
+  %v12 = icmp eq i8 %v11, 0
+  br i1 %v12, label %b6, label %b8
+
+b6:                                               ; preds = %b5
+  %v13 = icmp eq i8 %v3, 45
+  br i1 %v13, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6, %b5
+  %v14 = phi i8 [ 2, %b7 ], [ 0, %b6 ], [ 4, %b5 ]
+  %v15 = load i32, i32* %a1, align 4, !tbaa !5
+  %v16 = add i32 %v15, 1
+  store i32 %v16, i32* %a1, align 4, !tbaa !5
+  br label %b9
+
+b9:                                               ; preds = %b8, %b4
+  %v17 = phi i8 [ %v14, %b8 ], [ %v4, %b4 ]
+  %v18 = getelementptr inbounds i8, i8* %v5, i32 1
+  %v19 = getelementptr i8, i8* %v5, i32 1
+  %v20 = load i8, i8* %v19, align 1, !tbaa !4
+  %v21 = icmp ne i8 %v20, 0
+  %v22 = icmp ne i8 %v17, 1
+  %v23 = and i1 %v21, %v22
+  br i1 %v23, label %b2, label %b10
+
+b10:                                              ; preds = %b9
+  %v24 = phi i8* [ %v18, %b9 ]
+  %v25 = phi i8 [ %v17, %b9 ]
+  br label %b11
+
+b11:                                              ; preds = %b10, %b3, %b0
+  %v26 = phi i8 [ %v8, %b3 ], [ 0, %b0 ], [ %v25, %b10 ]
+  %v27 = phi i8* [ %v9, %b3 ], [ %v0, %b0 ], [ %v24, %b10 ]
+  store i8* %v27, i8** %a0, align 4, !tbaa !0
+  ret i8 %v26
+}
+
+declare zeroext i8 @f1(i8 zeroext)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"int", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/bss-local.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bss-local.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bss-local.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bss-local.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+ at g0 = common global [16 x i32] zeroinitializer, align 8
+ at g1 = internal global [16 x i32] zeroinitializer, align 8
+
+; CHECK-NOT: g1.*lcomm
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+  call void @f1(i32* getelementptr inbounds ([16 x i32], [16 x i32]* @g0, i32 0, i32 0), i32* getelementptr inbounds ([16 x i32], [16 x i32]* @g1, i32 0, i32 0))
+  %v0 = getelementptr inbounds [16 x i32], [16 x i32]* @g0, i32 0, i32 %a0
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = getelementptr inbounds [16 x i32], [16 x i32]* @g1, i32 0, i32 %a0
+  %v3 = load i32, i32* %v2, align 4
+  %v4 = add nsw i32 %v1, %v3
+  ret i32 %v4
+}
+
+declare void @f1(i32*, i32*)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug-aa4463-ifconv-vecpred.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,42 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+define inreg <16 x i32> @f0(i32 %a0, <16 x i32>* nocapture %a1) #0 {
+b0:
+  %v0 = tail call <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %a0)
+  %v1 = tail call <512 x i1> @llvm.hexagon.V6.pred.not(<512 x i1> %v0)
+  %v2 = icmp ult i32 %a0, 48
+  br i1 %v2, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v3 = add nuw nsw i32 %a0, 16
+  %v4 = tail call <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %v3)
+  %v5 = tail call <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1> %v4, <512 x i1> %v1)
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v6 = phi <512 x i1> [ %v5, %b1 ], [ %v1, %b0 ]
+  %v7 = bitcast <512 x i1> %v6 to <16 x i32>
+  %v8 = getelementptr inbounds <16 x i32>, <16 x i32>* %a1, i32 1
+  %v9 = load <16 x i32>, <16 x i32>* %v8, align 64
+  %v10 = getelementptr inbounds <16 x i32>, <16 x i32>* %a1, i32 2
+  %v11 = load <16 x i32>, <16 x i32>* %v10, align 64
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v6, <16 x i32> %v9, <16 x i32> %v11)
+  store <16 x i32> %v12, <16 x i32>* %a1, align 64
+  ret <16 x i32> %v7
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.pred.not(<512 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1>, <512 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/bug-allocframe-size.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug-allocframe-size.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug-allocframe-size.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug-allocframe-size.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,116 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; Make sure we allocate less than 100 bytes of stack
+; CHECK: allocframe(#{{[1-9][0-9]}}
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define float @f0(float %a0) #0 {
+b0:
+  %v0 = alloca float, align 4
+  %v1 = alloca i16, align 2
+  %v2 = alloca float, align 4
+  store float %a0, float* %v0, align 4, !tbaa !0
+  %v3 = call signext i16 @f1(i16* %v1, float* %v0) #1
+  %v4 = icmp ult i16 %v3, 3
+  br i1 %v4, label %b11, label %b1
+
+b1:                                               ; preds = %b0
+  %v5 = load i16, i16* %v1, align 2, !tbaa !4
+  %v6 = sext i16 %v5 to i32
+  %v7 = srem i32 %v6, 3
+  %v8 = icmp eq i32 %v7, 0
+  br i1 %v8, label %b6, label %b2
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v9 = phi i16 [ %v12, %b3 ], [ %v5, %b2 ]
+  %v10 = phi i32 [ %v11, %b3 ], [ 0, %b2 ]
+  %v11 = add nsw i32 %v10, -1
+  %v12 = add i16 %v9, 1
+  %v13 = sext i16 %v12 to i32
+  %v14 = srem i32 %v13, 3
+  %v15 = icmp eq i32 %v14, 0
+  br i1 %v15, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v16 = phi i16 [ %v12, %b3 ]
+  %v17 = phi i32 [ %v11, %b3 ]
+  %v18 = phi i32 [ %v10, %b3 ]
+  store i16 %v16, i16* %v1, align 2, !tbaa !4
+  %v19 = icmp slt i32 %v18, 1
+  br i1 %v19, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  %v20 = call signext i16 @f2(float* %v0, i32 %v17) #1
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4, %b1
+  %v21 = bitcast float* %v0 to i16*
+  %v22 = getelementptr inbounds i16, i16* %v21, i32 1
+  %v23 = load i16, i16* %v22, align 2, !tbaa !6
+  %v24 = icmp slt i16 %v23, 0
+  %v25 = load float, float* %v0, align 4, !tbaa !0
+  br i1 %v24, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  %v26 = fsub float -0.000000e+00, %v25
+  store float %v26, float* %v0, align 4, !tbaa !0
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6
+  %v27 = phi float [ %v26, %b7 ], [ %v25, %b6 ]
+  %v28 = phi i1 [ true, %b7 ], [ false, %b6 ]
+  %v29 = fmul float %v27, 0x3FCF3482C0000000
+  %v30 = fadd float %v29, 0x3FEEA88260000000
+  %v31 = fmul float %v27, %v30
+  %v32 = fadd float %v31, 0x3FB43419E0000000
+  %v33 = fadd float %v27, 0x3FD1E54B40000000
+  %v34 = fdiv float %v32, %v33
+  store float %v34, float* %v2, align 4, !tbaa !0
+  %v35 = fmul float %v27, 1.500000e+00
+  %v36 = fmul float %v34, %v34
+  %v37 = fmul float %v27, 5.000000e-01
+  %v38 = fdiv float %v37, %v34
+  %v39 = fadd float %v36, %v38
+  %v40 = fdiv float %v35, %v39
+  %v41 = fadd float %v34, %v40
+  %v42 = fmul float %v41, 5.000000e-01
+  br i1 %v28, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  %v43 = fsub float -0.000000e+00, %v42
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  %v44 = phi float [ %v43, %b9 ], [ %v42, %b8 ]
+  store float %v44, float* %v2, align 4, !tbaa !0
+  %v45 = load i16, i16* %v1, align 2, !tbaa !4
+  %v46 = sext i16 %v45 to i32
+  %v47 = sdiv i32 %v46, 3
+  %v48 = call signext i16 @f2(float* %v2, i32 %v47) #1
+  br label %b11
+
+b11:                                              ; preds = %b10, %b0
+  %v49 = phi float* [ %v2, %b10 ], [ %v0, %b0 ]
+  %v50 = load float, float* %v49, align 4
+  ret float %v50
+}
+
+declare signext i16 @f1(i16*, float*) #1
+
+declare signext i16 @f2(float*, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"float", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"short", !2, i64 0}
+!6 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug-hcp-tied-kill.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,55 @@
+; RUN: llc -march=hexagon -O2 -verify-machineinstrs < %s | FileCheck %s
+
+; CHECK: .globl
+
+target triple = "hexagon"
+
+ at g0 = private unnamed_addr constant [46 x i8] c"%x :  Q6_R_mpyiacc_RR(INT32_MAX,0,INT32_MAX)\0A\00", align 1
+ at g1 = private unnamed_addr constant [46 x i8] c"%x :  Q6_R_mpyiacc_RR(INT32_MIN,1,INT32_MAX)\0A\00", align 1
+ at g2 = private unnamed_addr constant [39 x i8] c"%x :  Q6_R_mpyiacc_RR(-1,1,INT32_MAX)\0A\00", align 1
+ at g3 = private unnamed_addr constant [38 x i8] c"%x :  Q6_R_mpyiacc_RR(0,1,INT32_MAX)\0A\00", align 1
+ at g4 = private unnamed_addr constant [38 x i8] c"%x :  Q6_R_mpyiacc_RR(1,1,INT32_MAX)\0A\00", align 1
+ at g5 = private unnamed_addr constant [46 x i8] c"%x :  Q6_R_mpyiacc_RR(INT32_MAX,1,INT32_MAX)\0A\00", align 1
+ at g6 = private unnamed_addr constant [54 x i8] c"%x :  Q6_R_mpyiacc_RR(INT32_MIN,INT32_MAX,INT32_MAX)\0A\00", align 1
+ at g7 = private unnamed_addr constant [47 x i8] c"%x :  Q6_R_mpyiacc_RR(-1,INT32_MAX,INT32_MAX)\0A\00", align 1
+ at g8 = private unnamed_addr constant [46 x i8] c"%x :  Q6_R_mpyiacc_RR(0,INT32_MAX,INT32_MAX)\0A\00", align 1
+ at g9 = private unnamed_addr constant [46 x i8] c"%x :  Q6_R_mpyiacc_RR(1,INT32_MAX,INT32_MAX)\0A\00", align 1
+ at g10 = private unnamed_addr constant [54 x i8] c"%x :  Q6_R_mpyiacc_RR(INT32_MAX,INT32_MAX,INT32_MAX)\0A\00", align 1
+
+; Function Attrs: nounwind
+declare i32 @f0(i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.M2.maci(i32 2147483647, i32 0, i32 2147483647)
+  %v1 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g0, i32 0, i32 0), i32 %v0) #2
+  %v2 = tail call i32 @llvm.hexagon.M2.maci(i32 -2147483648, i32 1, i32 2147483647)
+  %v3 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g1, i32 0, i32 0), i32 %v2) #2
+  %v4 = tail call i32 @llvm.hexagon.M2.maci(i32 -1, i32 1, i32 2147483647)
+  %v5 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @g2, i32 0, i32 0), i32 %v4) #2
+  %v6 = tail call i32 @llvm.hexagon.M2.maci(i32 0, i32 1, i32 2147483647)
+  %v7 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @g3, i32 0, i32 0), i32 %v6) #2
+  %v8 = tail call i32 @llvm.hexagon.M2.maci(i32 1, i32 1, i32 2147483647)
+  %v9 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @g4, i32 0, i32 0), i32 %v8) #2
+  %v10 = tail call i32 @llvm.hexagon.M2.maci(i32 2147483647, i32 1, i32 2147483647)
+  %v11 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g5, i32 0, i32 0), i32 %v10) #2
+  %v12 = tail call i32 @llvm.hexagon.M2.maci(i32 -2147483648, i32 2147483647, i32 2147483647)
+  %v13 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([54 x i8], [54 x i8]* @g6, i32 0, i32 0), i32 %v12) #2
+  %v14 = tail call i32 @llvm.hexagon.M2.maci(i32 -1, i32 2147483647, i32 2147483647)
+  %v15 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @g7, i32 0, i32 0), i32 %v14) #2
+  %v16 = tail call i32 @llvm.hexagon.M2.maci(i32 0, i32 2147483647, i32 2147483647)
+  %v17 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g8, i32 0, i32 0), i32 %v16) #2
+  %v18 = tail call i32 @llvm.hexagon.M2.maci(i32 1, i32 2147483647, i32 2147483647)
+  %v19 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @g9, i32 0, i32 0), i32 %v18) #2
+  %v20 = tail call i32 @llvm.hexagon.M2.maci(i32 2147483647, i32 2147483647, i32 2147483647)
+  %v21 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([54 x i8], [54 x i8]* @g10, i32 0, i32 0), i32 %v20) #2
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.maci(i32, i32, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug14859-iv-cleanup-lpad.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,231 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { i32 (...)**, i32, %s.1 }
+%s.1 = type { %s.2, %s.5*, %s.6*, i32 }
+%s.2 = type { i32 (...)**, i32, i8, i8, i16, i32, i32, %s.3*, %s.4*, i32* }
+%s.3 = type { %s.3*, i32, i32, i8* }
+%s.4 = type { %s.4*, i32, void (i8, %s.2*, i32)* }
+%s.5 = type { i32 (...)**, i8, i32*, i32*, i32**, i32**, i32*, i32*, i32**, i32**, i32*, i32*, i32**, i32**, i32* }
+%s.6 = type { i32 (...)**, %s.1 }
+%s.7 = type { %s.8, i8 }
+%s.8 = type { %s.0* }
+
+define %s.0* @f0(%s.0* %a0, i32* nocapture %a1, i32 %a2, i32 signext %a3) align 2 personality i8* bitcast (i32 (...)* @f11 to i8*) {
+b0:
+  %v0 = alloca %s.7, align 4
+  %v1 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
+  store i32 0, i32* %v1, align 4, !tbaa !0
+  call void @f2(%s.7* %v0, %s.0* %a0, i1 zeroext true)
+  %v2 = getelementptr inbounds %s.7, %s.7* %v0, i32 0, i32 1
+  %v3 = load i8, i8* %v2, align 4, !tbaa !4, !range !6
+  %v4 = icmp ne i8 %v3, 0
+  %v5 = icmp sgt i32 %a2, 0
+  %v6 = and i1 %v4, %v5
+  %v7 = bitcast %s.0* %a0 to i8**
+  br i1 %v6, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v8 = bitcast %s.0* %a0 to i8*
+  br label %b16
+
+b2:                                               ; preds = %b0
+  %v9 = load i8*, i8** %v7, align 4, !tbaa !7
+  %v10 = getelementptr i8, i8* %v9, i32 -12
+  %v11 = bitcast i8* %v10 to i32*
+  %v12 = load i32, i32* %v11, align 4
+  %v13 = bitcast %s.0* %a0 to i8*
+  %v14 = add i32 %v12, 32
+  %v15 = getelementptr inbounds i8, i8* %v13, i32 %v14
+  %v16 = bitcast i8* %v15 to %s.5**
+  %v17 = load %s.5*, %s.5** %v16, align 4, !tbaa !9
+  %v18 = invoke signext i32 @f3(%s.5* %v17)
+          to label %b3 unwind label %b7
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b13, %b3
+  %v19 = phi i32 [ %v68, %b13 ], [ %v18, %b3 ]
+  %v20 = phi i32 [ %v55, %b13 ], [ %a2, %b3 ]
+  %v21 = phi i32* [ %v59, %b13 ], [ %a1, %b3 ]
+  %v22 = icmp eq i32 %v19, -1
+  br i1 %v22, label %b15, label %b10
+
+b5:                                               ; preds = %b16, %b9
+  %v23 = landingpad { i8*, i32 }
+          cleanup
+  %v24 = extractvalue { i8*, i32 } %v23, 0
+  %v25 = extractvalue { i8*, i32 } %v23, 1
+  br label %b18
+
+b6:                                               ; preds = %b13
+  %v26 = landingpad { i8*, i32 }
+          catch i8* null
+  br label %b8
+
+b7:                                               ; preds = %b11, %b2
+  %v27 = phi i32* [ %v21, %b11 ], [ %a1, %b2 ]
+  %v28 = landingpad { i8*, i32 }
+          catch i8* null
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6
+  %v29 = phi i32* [ %v59, %b6 ], [ %v27, %b7 ]
+  %v30 = phi { i8*, i32 } [ %v26, %b6 ], [ %v28, %b7 ]
+  %v31 = extractvalue { i8*, i32 } %v30, 0
+  %v32 = call i8* @f9(i8* %v31) #0
+  %v33 = load i8*, i8** %v7, align 4, !tbaa !7
+  %v34 = getelementptr i8, i8* %v33, i32 -12
+  %v35 = bitcast i8* %v34 to i32*
+  %v36 = load i32, i32* %v35, align 4
+  %v37 = getelementptr inbounds i8, i8* %v13, i32 %v36
+  %v38 = bitcast i8* %v37 to %s.1*
+  %v39 = add i32 %v36, 8
+  %v40 = getelementptr inbounds i8, i8* %v13, i32 %v39
+  %v41 = load i8, i8* %v40, align 1, !tbaa !11
+  %v42 = or i8 %v41, 4
+  invoke void @f6(%s.1* %v38, i8 zeroext %v42, i1 zeroext true)
+          to label %b9 unwind label %b14
+
+b9:                                               ; preds = %b8
+  invoke void @f10()
+          to label %b16 unwind label %b5
+
+b10:                                              ; preds = %b4
+  %v43 = icmp eq i32 %v19, %a3
+  br i1 %v43, label %b11, label %b12
+
+b11:                                              ; preds = %b10
+  %v44 = load i32, i32* %v1, align 4, !tbaa !0
+  %v45 = add nsw i32 %v44, 1
+  store i32 %v45, i32* %v1, align 4, !tbaa !0
+  %v46 = load i8*, i8** %v7, align 4, !tbaa !7
+  %v47 = getelementptr i8, i8* %v46, i32 -12
+  %v48 = bitcast i8* %v47 to i32*
+  %v49 = load i32, i32* %v48, align 4
+  %v50 = add i32 %v49, 32
+  %v51 = getelementptr inbounds i8, i8* %v13, i32 %v50
+  %v52 = bitcast i8* %v51 to %s.5**
+  %v53 = load %s.5*, %s.5** %v52, align 4, !tbaa !9
+  %v54 = invoke signext i32 @f4(%s.5* %v53)
+          to label %b16 unwind label %b7
+
+b12:                                              ; preds = %b10
+  %v55 = add nsw i32 %v20, -1
+  %v56 = icmp slt i32 %v55, 1
+  br i1 %v56, label %b15, label %b13
+
+b13:                                              ; preds = %b12
+  %v57 = load i32, i32* %v1, align 4, !tbaa !0
+  %v58 = add nsw i32 %v57, 1
+  store i32 %v58, i32* %v1, align 4, !tbaa !0
+  %v59 = getelementptr inbounds i32, i32* %v21, i32 1
+  store i32 %v19, i32* %v21, align 4, !tbaa !13
+  %v60 = load i8*, i8** %v7, align 4, !tbaa !7
+  %v61 = getelementptr i8, i8* %v60, i32 -12
+  %v62 = bitcast i8* %v61 to i32*
+  %v63 = load i32, i32* %v62, align 4
+  %v64 = add i32 %v63, 32
+  %v65 = getelementptr inbounds i8, i8* %v13, i32 %v64
+  %v66 = bitcast i8* %v65 to %s.5**
+  %v67 = load %s.5*, %s.5** %v66, align 4, !tbaa !9
+  %v68 = invoke signext i32 @f5(%s.5* %v67)
+          to label %b4 unwind label %b6
+
+b14:                                              ; preds = %b8
+  %v69 = landingpad { i8*, i32 }
+          cleanup
+  %v70 = extractvalue { i8*, i32 } %v69, 0
+  %v71 = extractvalue { i8*, i32 } %v69, 1
+  invoke void @f10()
+          to label %b18 unwind label %b20
+
+b15:                                              ; preds = %b12, %b4
+  %v72 = phi i8 [ 2, %b12 ], [ 1, %b4 ]
+  br label %b16
+
+b16:                                              ; preds = %b15, %b11, %b9, %b1
+  %v73 = phi i8* [ %v8, %b1 ], [ %v13, %b11 ], [ %v13, %b9 ], [ %v13, %b15 ]
+  %v74 = phi i8 [ 0, %b1 ], [ 0, %b11 ], [ 0, %b9 ], [ %v72, %b15 ]
+  %v75 = phi i32* [ %a1, %b1 ], [ %v21, %b11 ], [ %v29, %b9 ], [ %v21, %b15 ]
+  store i32 0, i32* %v75, align 4, !tbaa !13
+  %v76 = load i8*, i8** %v7, align 4, !tbaa !7
+  %v77 = getelementptr i8, i8* %v76, i32 -12
+  %v78 = bitcast i8* %v77 to i32*
+  %v79 = load i32, i32* %v78, align 4
+  %v80 = getelementptr inbounds i8, i8* %v73, i32 %v79
+  %v81 = bitcast i8* %v80 to %s.1*
+  %v82 = load i32, i32* %v1, align 4, !tbaa !0
+  %v83 = icmp eq i32 %v82, 0
+  %v84 = or i8 %v74, 2
+  %v85 = select i1 %v83, i8 %v84, i8 %v74
+  invoke void @f7(%s.1* %v81, i8 zeroext %v85, i1 zeroext false)
+          to label %b17 unwind label %b5
+
+b17:                                              ; preds = %b16
+  %v86 = getelementptr inbounds %s.7, %s.7* %v0, i32 0, i32 0
+  call void @f1(%s.8* %v86)
+  ret %s.0* %a0
+
+b18:                                              ; preds = %b14, %b5
+  %v87 = phi i8* [ %v24, %b5 ], [ %v70, %b14 ]
+  %v88 = phi i32 [ %v25, %b5 ], [ %v71, %b14 ]
+  %v89 = getelementptr inbounds %s.7, %s.7* %v0, i32 0, i32 0
+  invoke void @f1(%s.8* %v89)
+          to label %b19 unwind label %b20
+
+b19:                                              ; preds = %b18
+  %v90 = insertvalue { i8*, i32 } undef, i8* %v87, 0
+  %v91 = insertvalue { i8*, i32 } %v90, i32 %v88, 1
+  resume { i8*, i32 } %v91
+
+b20:                                              ; preds = %b18, %b14
+  %v92 = landingpad { i8*, i32 }
+          catch i8* null
+  call void @f8() #1
+  unreachable
+}
+
+declare void @f1(%s.8* nocapture) unnamed_addr align 2
+
+declare void @f2(%s.7* nocapture, %s.0*, i1 zeroext) unnamed_addr align 2
+
+declare signext i32 @f3(%s.5*) align 2
+
+declare signext i32 @f4(%s.5*) align 2
+
+declare signext i32 @f5(%s.5*) align 2
+
+declare void @f6(%s.1*, i8 zeroext, i1 zeroext) align 2
+
+declare void @f7(%s.1*, i8 zeroext, i1 zeroext) align 2
+
+declare void @f8()
+
+declare i8* @f9(i8*)
+
+declare void @f10()
+
+declare i32 @f11(...)
+
+attributes #0 = { nounwind }
+attributes #1 = { noreturn nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"bool", !2}
+!6 = !{i8 0, i8 2}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"vtable pointer", !3}
+!9 = !{!10, !10, i64 0}
+!10 = !{!"any pointer", !2}
+!11 = !{!12, !12, i64 0}
+!12 = !{!"_ZTSNSt5_IosbIiE8_IostateE", !2}
+!13 = !{!14, !14, i64 0}
+!14 = !{!"wchar_t", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug14859-split-const-block-addr.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,363 @@
+; RUN: llc -march=hexagon -O3 -hexagon-small-data-threshold=0 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { %s.1, %s.1* }
+%s.1 = type { i8*, i8*, i8*, i32 }
+
+; Function Attrs: nounwind
+declare i32 @f0(%s.0* nocapture) #0 align 2
+
+; Function Attrs: nounwind
+declare void @f1(%s.0* nocapture) unnamed_addr #0 align 2
+
+; Function Attrs: inlinehint
+define void @f2(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, %s.0* %a5, i1 (i8, i8)* %a6) #1 {
+b0:
+  %v0 = alloca %s.0, align 4
+  %v1 = alloca %s.0, align 4
+  %v2 = alloca %s.0, align 4
+  %v3 = alloca %s.0, align 4
+  %v4 = alloca %s.0, align 4
+  %v5 = alloca %s.0, align 4
+  %v6 = inttoptr i32 %a0 to i8*
+  %v7 = inttoptr i32 %a1 to i8*
+  %v8 = add nsw i32 %a4, %a3
+  %v9 = icmp eq i32 %v8, 2
+  br i1 %v9, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  call void @f7(i8* %v7, i8* %v6, i1 (i8, i8)* %a6)
+  br label %b43
+
+b2:                                               ; preds = %b0
+  %v10 = icmp sgt i32 %a3, %a4
+  br i1 %v10, label %b18, label %b3
+
+b3:                                               ; preds = %b2
+  %v11 = call i32 @f0(%s.0* %a5)
+  %v12 = icmp slt i32 %v11, %a3
+  br i1 %v12, label %b18, label %b4
+
+b4:                                               ; preds = %b3
+  %v13 = getelementptr inbounds %s.0, %s.0* %a5, i32 0, i32 1
+  %v14 = load %s.1*, %s.1** %v13, align 4, !tbaa !0
+  %v15 = getelementptr inbounds %s.1, %s.1* %v14, i32 0, i32 0
+  %v16 = load i8*, i8** %v15, align 4, !tbaa !0
+  %v17 = getelementptr inbounds %s.1, %s.1* %v14, i32 0, i32 1
+  store i8* %v16, i8** %v17, align 4, !tbaa !0
+  %v18 = bitcast %s.0* %v3 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 4 %v18, i8 0, i64 16, i1 false)
+  %v19 = load %s.1*, %s.1** %v13, align 4, !tbaa !0
+  %v20 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 1
+  store %s.1* %v19, %s.1** %v20, align 4, !tbaa !0
+  %v21 = bitcast %s.0* %v1 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 -1, i8* %v21)
+  call void @llvm.memset.p0i8.i64(i8* align 4 %v21, i8 0, i64 16, i1 false)
+  %v22 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 1
+  store %s.1* %v19, %s.1** %v22, align 4, !tbaa !0
+  %v23 = icmp eq i8* %v6, %v7
+  br i1 %v23, label %b6, label %b5
+
+b5:                                               ; preds = %b4
+  call void @f8(i8* %v6, %s.0* %v1, i8* %v7)
+  %v24 = load %s.1*, %s.1** %v22, align 4, !tbaa !0
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4
+  %v25 = phi %s.1* [ %v24, %b5 ], [ %v19, %b4 ]
+  %v26 = bitcast %s.0* %v2 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 4 %v26, i8 0, i64 16, i1 false)
+  %v27 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 1
+  store %s.1* %v25, %s.1** %v27, align 4, !tbaa !0
+  call void @f1(%s.0* %v1) #0
+  call void @llvm.lifetime.end.p0i8(i64 -1, i8* %v21)
+  call void @f1(%s.0* %v2) #0
+  call void @f1(%s.0* %v3) #0
+  %v28 = load %s.1*, %s.1** %v13, align 4, !tbaa !0
+  %v29 = getelementptr inbounds %s.1, %s.1* %v28, i32 0, i32 0
+  %v30 = load i8*, i8** %v29, align 4, !tbaa !0
+  %v31 = getelementptr inbounds %s.1, %s.1* %v28, i32 0, i32 1
+  %v32 = load i8*, i8** %v31, align 4, !tbaa !0
+  %v33 = inttoptr i32 %a2 to i8*
+  %v34 = icmp eq i8* %v30, %v32
+  br i1 %v34, label %b15, label %b7
+
+b7:                                               ; preds = %b6
+  br label %b8
+
+b8:                                               ; preds = %b12, %b7
+  %v35 = phi i8* [ %v47, %b12 ], [ %v30, %b7 ]
+  %v36 = phi i8* [ %v48, %b12 ], [ %v6, %b7 ]
+  %v37 = phi i8* [ %v46, %b12 ], [ %v7, %b7 ]
+  %v38 = icmp eq i8* %v37, %v33
+  br i1 %v38, label %b13, label %b9
+
+b9:                                               ; preds = %b8
+  %v39 = load i8, i8* %v37, align 1, !tbaa !4
+  %v40 = load i8, i8* %v35, align 1, !tbaa !4
+  %v41 = call zeroext i1 %a6(i8 zeroext %v39, i8 zeroext %v40)
+  br i1 %v41, label %b10, label %b11
+
+b10:                                              ; preds = %b9
+  %v42 = load i8, i8* %v37, align 1, !tbaa !4
+  store i8 %v42, i8* %v36, align 1, !tbaa !4
+  %v43 = getelementptr inbounds i8, i8* %v37, i32 1
+  br label %b12
+
+b11:                                              ; preds = %b9
+  %v44 = load i8, i8* %v35, align 1, !tbaa !4
+  store i8 %v44, i8* %v36, align 1, !tbaa !4
+  %v45 = getelementptr inbounds i8, i8* %v35, i32 1
+  br label %b12
+
+b12:                                              ; preds = %b11, %b10
+  %v46 = phi i8* [ %v43, %b10 ], [ %v37, %b11 ]
+  %v47 = phi i8* [ %v35, %b10 ], [ %v45, %b11 ]
+  %v48 = getelementptr inbounds i8, i8* %v36, i32 1
+  %v49 = icmp eq i8* %v47, %v32
+  br i1 %v49, label %b14, label %b8
+
+b13:                                              ; preds = %b8
+  call void @f9(i8* %v35, i8* %v36, i8* %v32)
+  br label %b43
+
+b14:                                              ; preds = %b12
+  br label %b15
+
+b15:                                              ; preds = %b14, %b6
+  %v50 = phi i8* [ %v7, %b6 ], [ %v46, %b14 ]
+  %v51 = phi i8* [ %v6, %b6 ], [ %v48, %b14 ]
+  %v52 = icmp eq i8* %v50, %v33
+  br i1 %v52, label %b43, label %b16
+
+b16:                                              ; preds = %b15
+  br label %b17
+
+b17:                                              ; preds = %b17, %b16
+  %v53 = phi i8* [ %v56, %b17 ], [ %v51, %b16 ]
+  %v54 = phi i8* [ %v57, %b17 ], [ %v50, %b16 ]
+  %v55 = load i8, i8* %v54, align 1, !tbaa !4
+  store i8 %v55, i8* %v53, align 1, !tbaa !4
+  %v56 = getelementptr inbounds i8, i8* %v53, i32 1
+  %v57 = getelementptr inbounds i8, i8* %v54, i32 1
+  %v58 = icmp eq i8* %v57, %v33
+  br i1 %v58, label %b42, label %b17
+
+b18:                                              ; preds = %b3, %b2
+  %v59 = call i32 @f0(%s.0* %a5)
+  %v60 = icmp slt i32 %v59, %a4
+  br i1 %v60, label %b33, label %b19
+
+b19:                                              ; preds = %b18
+  %v61 = getelementptr inbounds %s.0, %s.0* %a5, i32 0, i32 1
+  %v62 = load %s.1*, %s.1** %v61, align 4, !tbaa !0
+  %v63 = getelementptr inbounds %s.1, %s.1* %v62, i32 0, i32 0
+  %v64 = load i8*, i8** %v63, align 4, !tbaa !0
+  %v65 = getelementptr inbounds %s.1, %s.1* %v62, i32 0, i32 1
+  store i8* %v64, i8** %v65, align 4, !tbaa !0
+  %v66 = bitcast %s.0* %v5 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 4 %v66, i8 0, i64 16, i1 false)
+  %v67 = load %s.1*, %s.1** %v61, align 4, !tbaa !0
+  %v68 = getelementptr inbounds %s.0, %s.0* %v5, i32 0, i32 1
+  store %s.1* %v67, %s.1** %v68, align 4, !tbaa !0
+  %v69 = bitcast %s.0* %v0 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 -1, i8* %v69)
+  call void @llvm.memset.p0i8.i64(i8* align 4 %v69, i8 0, i64 16, i1 false)
+  %v70 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 1
+  store %s.1* %v67, %s.1** %v70, align 4, !tbaa !0
+  %v71 = inttoptr i32 %a2 to i8*
+  %v72 = icmp eq i8* %v7, %v71
+  br i1 %v72, label %b21, label %b20
+
+b20:                                              ; preds = %b19
+  call void @f8(i8* %v7, %s.0* %v0, i8* %v71)
+  %v73 = load %s.1*, %s.1** %v70, align 4, !tbaa !0
+  br label %b21
+
+b21:                                              ; preds = %b20, %b19
+  %v74 = phi %s.1* [ %v73, %b20 ], [ %v67, %b19 ]
+  %v75 = bitcast %s.0* %v4 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 4 %v75, i8 0, i64 16, i1 false)
+  %v76 = getelementptr inbounds %s.0, %s.0* %v4, i32 0, i32 1
+  store %s.1* %v74, %s.1** %v76, align 4, !tbaa !0
+  call void @f1(%s.0* %v0) #0
+  call void @llvm.lifetime.end.p0i8(i64 -1, i8* %v69)
+  call void @f1(%s.0* %v4) #0
+  call void @f1(%s.0* %v5) #0
+  %v77 = load %s.1*, %s.1** %v61, align 4, !tbaa !0
+  %v78 = getelementptr inbounds %s.1, %s.1* %v77, i32 0, i32 0
+  %v79 = load i8*, i8** %v78, align 4, !tbaa !0
+  %v80 = getelementptr inbounds %s.1, %s.1* %v77, i32 0, i32 1
+  %v81 = load i8*, i8** %v80, align 4, !tbaa !0
+  %v82 = icmp eq i8* %v6, %v7
+  br i1 %v82, label %b25, label %b22
+
+b22:                                              ; preds = %b21
+  br label %b23
+
+b23:                                              ; preds = %b31, %b22
+  %v83 = phi i8* [ %v100, %b31 ], [ %v81, %b22 ]
+  %v84 = phi i8* [ %v111, %b31 ], [ %v71, %b22 ]
+  %v85 = phi i8* [ %v86, %b31 ], [ %v7, %b22 ]
+  %v86 = getelementptr inbounds i8, i8* %v85, i32 -1
+  %v87 = icmp eq i8* %v83, %v79
+  br i1 %v87, label %b28, label %b24
+
+b24:                                              ; preds = %b23
+  br label %b30
+
+b25:                                              ; preds = %b31, %b21
+  %v88 = phi i8* [ %v81, %b21 ], [ %v100, %b31 ]
+  %v89 = phi i8* [ %v71, %b21 ], [ %v111, %b31 ]
+  %v90 = icmp eq i8* %v88, %v79
+  br i1 %v90, label %b43, label %b26
+
+b26:                                              ; preds = %b25
+  br label %b27
+
+b27:                                              ; preds = %b27, %b26
+  %v91 = phi i8* [ %v93, %b27 ], [ %v88, %b26 ]
+  %v92 = phi i8* [ %v95, %b27 ], [ %v89, %b26 ]
+  %v93 = getelementptr inbounds i8, i8* %v91, i32 -1
+  %v94 = load i8, i8* %v93, align 1, !tbaa !4
+  %v95 = getelementptr inbounds i8, i8* %v92, i32 -1
+  store i8 %v94, i8* %v95, align 1, !tbaa !4
+  %v96 = icmp eq i8* %v93, %v79
+  br i1 %v96, label %b41, label %b27
+
+b28:                                              ; preds = %b31, %b23
+  %v97 = phi i8* [ %v111, %b31 ], [ %v84, %b23 ]
+  %v98 = icmp eq i8* %v6, %v85
+  br i1 %v98, label %b43, label %b29
+
+b29:                                              ; preds = %b28
+  call void @f6(i8* %v97, i8* %v85, i8* %v6)
+  br label %b43
+
+b30:                                              ; preds = %b31, %b24
+  %v99 = phi i8* [ %v111, %b31 ], [ %v84, %b24 ]
+  %v100 = phi i8* [ %v101, %b31 ], [ %v83, %b24 ]
+  %v101 = getelementptr inbounds i8, i8* %v100, i32 -1
+  %v102 = load i8, i8* %v101, align 1, !tbaa !4
+  %v103 = load i8, i8* %v86, align 1, !tbaa !4
+  %v104 = call zeroext i1 %a6(i8 zeroext %v102, i8 zeroext %v103)
+  br i1 %v104, label %b31, label %b32
+
+b31:                                              ; preds = %b32, %b30
+  %v105 = phi i8* [ %v101, %b32 ], [ %v86, %b30 ]
+  %v106 = phi i8* [ %v101, %b32 ], [ %v6, %b30 ]
+  %v107 = phi i8* [ %v79, %b32 ], [ %v86, %b30 ]
+  %v108 = phi i8* [ blockaddress(@f2, %b30), %b32 ], [ blockaddress(@f2, %b23), %b30 ]
+  %v109 = phi i8* [ blockaddress(@f2, %b28), %b32 ], [ blockaddress(@f2, %b25), %b30 ]
+  %v110 = load i8, i8* %v105, align 1, !tbaa !4
+  %v111 = getelementptr inbounds i8, i8* %v99, i32 -1
+  store i8 %v110, i8* %v111, align 1, !tbaa !4
+  %v112 = icmp eq i8* %v106, %v107
+  %v113 = select i1 %v112, i8* %v109, i8* %v108
+  indirectbr i8* %v113, [label %b25, label %b28, label %b23, label %b30]
+
+b32:                                              ; preds = %b30
+  br label %b31
+
+b33:                                              ; preds = %b18
+  br i1 %v10, label %b34, label %b37
+
+b34:                                              ; preds = %b33
+  %v114 = sdiv i32 %a3, 2
+  %v115 = getelementptr inbounds i8, i8* %v6, i32 %v114
+  %v116 = sub i32 %a2, %a1
+  %v117 = icmp sgt i32 %v116, 0
+  br i1 %v117, label %b35, label %b36
+
+b35:                                              ; preds = %b34
+  %v118 = call i8* @f5(i8* %v7, i32 %v116, i8* %v115, i1 (i8, i8)* %a6)
+  br label %b36
+
+b36:                                              ; preds = %b35, %b34
+  %v119 = phi i8* [ %v7, %b34 ], [ %v118, %b35 ]
+  %v120 = ptrtoint i8* %v119 to i32
+  %v121 = sub i32 %v120, %a1
+  br label %b40
+
+b37:                                              ; preds = %b33
+  %v122 = sdiv i32 %a4, 2
+  %v123 = getelementptr inbounds i8, i8* %v7, i32 %v122
+  %v124 = sub i32 %a1, %a0
+  %v125 = icmp sgt i32 %v124, 0
+  br i1 %v125, label %b38, label %b39
+
+b38:                                              ; preds = %b37
+  %v126 = call i8* @f4(i8* %v6, i32 %v124, i8* %v123, i1 (i8, i8)* %a6)
+  br label %b39
+
+b39:                                              ; preds = %b38, %b37
+  %v127 = phi i8* [ %v6, %b37 ], [ %v126, %b38 ]
+  %v128 = ptrtoint i8* %v127 to i32
+  %v129 = sub i32 %v128, %a0
+  br label %b40
+
+b40:                                              ; preds = %b39, %b36
+  %v130 = phi i8* [ %v127, %b39 ], [ %v115, %b36 ]
+  %v131 = phi i8* [ %v123, %b39 ], [ %v119, %b36 ]
+  %v132 = phi i32 [ %v129, %b39 ], [ %v114, %b36 ]
+  %v133 = phi i32 [ %v122, %b39 ], [ %v121, %b36 ]
+  %v134 = sub nsw i32 %a3, %v132
+  %v135 = ptrtoint i8* %v130 to i32
+  %v136 = ptrtoint i8* %v131 to i32
+  %v137 = call i32 @f3(i32 %v135, i32 %a1, i32 %v136, i32 %v134, i32 %v133, %s.0* %a5)
+  call void @f2(i32 %a0, i32 %v135, i32 %v137, i32 %v132, i32 %v133, %s.0* %a5, i1 (i8, i8)* %a6)
+  %v138 = sub nsw i32 %a4, %v133
+  call void @f2(i32 %v137, i32 %v136, i32 %a2, i32 %v134, i32 %v138, %s.0* %a5, i1 (i8, i8)* %a6)
+  br label %b43
+
+b41:                                              ; preds = %b27
+  br label %b43
+
+b42:                                              ; preds = %b17
+  br label %b43
+
+b43:                                              ; preds = %b42, %b41, %b40, %b29, %b28, %b25, %b15, %b13, %b1
+  ret void
+}
+
+; Function Attrs: inlinehint
+declare i32 @f3(i32, i32, i32, i32, i32, %s.0* nocapture) #1
+
+; Function Attrs: inlinehint
+declare i8* @f4(i8*, i32, i8*, i1 (i8, i8)*) #1
+
+; Function Attrs: inlinehint
+declare i8* @f5(i8*, i32, i8*, i1 (i8, i8)*) #1
+
+; Function Attrs: inlinehint
+declare void @f6(i8*, i8*, i8*) #1
+
+; Function Attrs: inlinehint
+declare void @f7(i8*, i8*, i1 (i8, i8)*) #1
+
+; Function Attrs: inlinehint
+declare void @f8(i8*, %s.0*, i8*) #1
+
+; Function Attrs: inlinehint
+declare void @f9(i8*, i8*, i8*) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #2
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #2
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #2
+
+attributes #0 = { nounwind }
+attributes #1 = { inlinehint }
+attributes #2 = { argmemonly nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/bug15515-shuffle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug15515-shuffle.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug15515-shuffle.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug15515-shuffle.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,45 @@
+; RUN: opt -march=hexagon -O2 -vectorize-loops -S < %s
+; REQUIRES: asserts
+;
+; -fvectorize-loops infinite compile/memory
+; test checks that the compile completes successfully
+
+target triple = "hexagon"
+
+ at g0 = global i8 -1, align 1
+ at g1 = common global [15 x i8] zeroinitializer, align 8
+ at g2 = common global [15 x i8*] zeroinitializer, align 8
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 0, i32* %v0, align 4
+  store i32 0, i32* %v0, align 4
+  br label %b1
+
+b1:                                               ; preds = %b3, %b0
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = icmp slt i32 %v1, 15
+  br i1 %v2, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v3 = load i32, i32* %v0, align 4
+  %v4 = getelementptr inbounds [15 x i8], [15 x i8]* @g1, i32 0, i32 %v3
+  store i8 0, i8* %v4, align 1
+  %v5 = load i32, i32* %v0, align 4
+  %v6 = getelementptr inbounds [15 x i8*], [15 x i8*]* @g2, i32 0, i32 %v5
+  store i8* @g0, i8** %v6, align 4
+  br label %b3
+
+b3:                                               ; preds = %b2
+  %v7 = load i32, i32* %v0, align 4
+  %v8 = add nsw i32 %v7, 1
+  store i32 %v8, i32* %v0, align 4
+  br label %b1
+
+b4:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/bug17276.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug17276.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug17276.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug17276.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,104 @@
+; RUN: llc  -march=hexagon -function-sections < %s | FileCheck %s
+; CHECK: if (!p0)
+; CHECK-NOT: if (p0.new)
+; CHECK: {
+
+target triple = "hexagon-unknown--elf"
+
+%s.0 = type { i8*, i8* }
+%s.1 = type { i8, [2 x %s.2*] }
+%s.2 = type { i32, i32 }
+
+ at g0 = internal constant %s.0 zeroinitializer, align 4
+
+; Function Attrs: minsize nounwind
+define i32 @f0(%s.1* %a0) #0 {
+b0:
+  %v0 = tail call i32 @f1(%s.1* %a0, i32 0)
+  ret i32 %v0
+}
+
+; Function Attrs: minsize nounwind
+define internal i32 @f1(%s.1* %a0, i32 %a1) #0 {
+b0:
+  %v0 = icmp eq %s.1* %a0, null
+  br i1 %v0, label %b4, label %b1
+
+b1:                                               ; preds = %b0
+  %v1 = icmp eq i32 %a1, 1
+  br i1 %v1, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  tail call void @f2(%s.0* null) #3
+  unreachable
+
+b3:                                               ; preds = %b1
+  tail call void @f2(%s.0* @g0) #3
+  unreachable
+
+b4:                                               ; preds = %b0
+  %v2 = load %s.2*, %s.2** inttoptr (i32 4 to %s.2**), align 4, !tbaa !0
+  %v3 = icmp eq %s.2* %v2, null
+  br i1 %v3, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  tail call void @f3(i32 0) #4
+  br label %b10
+
+b6:                                               ; preds = %b4
+  %v4 = tail call zeroext i8 @f4(%s.1* null) #4
+  %v5 = icmp eq i8 %v4, 0
+  br i1 %v5, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  tail call void @f3(i32 0) #4
+  br label %b9
+
+b8:                                               ; preds = %b6
+  %v6 = load %s.2*, %s.2** inttoptr (i32 4 to %s.2**), align 4, !tbaa !0
+  %v7 = icmp eq i32 %a1, 1
+  %v8 = getelementptr inbounds %s.2, %s.2* %v6, i32 0, i32 1
+  %v9 = getelementptr inbounds %s.2, %s.2* %v6, i32 0, i32 0
+  %v10 = select i1 %v7, i32* %v8, i32* %v9
+  %v11 = tail call i32 @f5(i32* %v10) #4
+  br label %b9
+
+b9:                                               ; preds = %b8, %b7
+  %v12 = phi i32 [ 0, %b7 ], [ %v11, %b8 ]
+  tail call void @f3(i32 %v12) #4
+  br label %b10
+
+b10:                                              ; preds = %b9, %b5
+  %v13 = phi i32 [ 0, %b5 ], [ %v12, %b9 ]
+  ret i32 %v13
+}
+
+; Function Attrs: noreturn optsize
+declare void @f2(%s.0*) #1
+
+; Function Attrs: optsize
+declare void @f3(i32) #2
+
+; Function Attrs: optsize
+declare zeroext i8 @f4(%s.1*) #2
+
+; Function Attrs: optsize
+declare i32 @f5(i32*) #2
+
+; Function Attrs: minsize nounwind
+define i32 @f6(%s.1* %a0) #0 {
+b0:
+  %v0 = tail call i32 @f1(%s.1* %a0, i32 1)
+  ret i32 %v0
+}
+
+attributes #0 = { minsize nounwind }
+attributes #1 = { noreturn optsize }
+attributes #2 = { optsize }
+attributes #3 = { noreturn nounwind optsize }
+attributes #4 = { nounwind optsize }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/bug17386.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug17386.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug17386.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug17386.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,89 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i8* %a0, ...) #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b2, %b0
+  br i1 undef, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  br i1 undef, label %b4, label %b5
+
+b4:                                               ; preds = %b3
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3
+  br label %b6
+
+b6:                                               ; preds = %b12, %b5
+  br i1 undef, label %b9, label %b7
+
+b7:                                               ; preds = %b6
+  %v0 = load i8, i8* undef, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  br i1 undef, label %b9, label %b8
+
+b8:                                               ; preds = %b7
+  br i1 undef, label %b9, label %b10
+
+b9:                                               ; preds = %b8, %b7, %b6
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  %v2 = phi i32 [ undef, %b9 ], [ %v1, %b8 ]
+  %v3 = icmp eq i32 %v2, 37
+  %v4 = sext i1 %v3 to i32
+  %v5 = icmp slt i32 0, 1
+  br i1 %v5, label %b12, label %b11
+
+b11:                                              ; preds = %b10
+  unreachable
+
+b12:                                              ; preds = %b10
+  br i1 undef, label %b13, label %b6
+
+b13:                                              ; preds = %b12
+  br label %b14
+
+b14:                                              ; preds = %b15, %b13
+  br i1 undef, label %b16, label %b15
+
+b15:                                              ; preds = %b14
+  br i1 undef, label %b14, label %b16
+
+b16:                                              ; preds = %b15, %b14
+  br label %b17
+
+b17:                                              ; preds = %b18, %b16
+  %v6 = phi i8* [ undef, %b16 ], [ %v7, %b18 ]
+  %v7 = getelementptr inbounds i8, i8* %v6, i32 1
+  %v8 = load i8, i8* %v7, align 1, !tbaa !0
+  br label %b18
+
+b18:                                              ; preds = %b19, %b17
+  %v9 = phi i32 [ 5, %b17 ], [ %v11, %b19 ]
+  %v10 = icmp eq i8 undef, %v8
+  br i1 %v10, label %b17, label %b19
+
+b19:                                              ; preds = %b18
+  %v11 = add i32 %v9, -1
+  %v12 = icmp eq i32 %v11, 0
+  br i1 %v12, label %b20, label %b18
+
+b20:                                              ; preds = %b19
+  unreachable
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/bug18008.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug18008.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug18008.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug18008.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,94 @@
+;RUN: llc -march=hexagon -filetype=obj < %s -o - | llvm-objdump -mv60 -mhvx -d - | FileCheck %s
+
+; Should not crash! and map to vxor
+
+target triple = "hexagon"
+
+ at g0 = common global <32 x i32> zeroinitializer, align 128
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = call <32 x i32> @llvm.hexagon.V6.vd0.128B()
+  store <32 x i32> %v0, <32 x i32>* @g0, align 128
+  ret i32 0
+}
+; CHECK: { v{{[0-9]}} = vxor(v{{[0-9]}},v{{[0-9]}})
+
+; Function Attrs: nounwind
+define i32 @f1(i32 %a0) #0 {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = alloca i8, align 1
+  %v2 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 5, i32 0)
+  %v3 = trunc i64 %v2 to i8
+  store volatile i8 %v3, i8* %v0, align 1
+  %v4 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 4, i32 4)
+  %v5 = trunc i64 %v4 to i8
+  store volatile i8 %v5, i8* %v1, align 1
+  %v6 = load volatile i8, i8* %v0, align 1
+  %v7 = zext i8 %v6 to i32
+  %v8 = load volatile i8, i8* %v1, align 1
+  %v9 = zext i8 %v8 to i32
+  %v10 = add nuw nsw i32 %v9, %v7
+  ret i32 %v10
+}
+; CHECK: combine(#0,#4)
+; CHECK: r{{[0-9]}}:{{[0-9]}} = asr(r{{[0-9]}}:{{[0-9]}},#3):rnd
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64, i32) #1
+
+; Function Attrs: nounwind
+define i32 @f2(i32 %a0) #0 {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = alloca i8, align 1
+  %v2 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 6, i32 0)
+  %v3 = trunc i64 %v2 to i8
+  store volatile i8 %v3, i8* %v0, align 1
+  %v4 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 4, i32 4)
+  %v5 = trunc i64 %v4 to i8
+  store volatile i8 %v5, i8* %v0, align 1
+  %v6 = load volatile i8, i8* %v0, align 1
+  %v7 = zext i8 %v6 to i32
+  %v8 = load volatile i8, i8* %v1, align 1
+  %v9 = zext i8 %v8 to i32
+  %v10 = add nuw nsw i32 %v9, %v7
+  ret i32 %v10
+}
+; CHECK: combine(#0,#4)
+; CHECK: r{{[0-9]}}:{{[0-9]}} = vasrh(r{{[0-9]}}:{{[0-9]}},#3):raw
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64, i32) #1
+
+; Function Attrs: nounwind
+define i32 @f3(i32 %a0) #0 {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = alloca i8, align 1
+  %v2 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 0, i32 0)
+  %v3 = trunc i32 %v2 to i8
+  store volatile i8 %v3, i8* %v0, align 1
+  %v4 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 4, i32 4)
+  %v5 = trunc i32 %v4 to i8
+  store volatile i8 %v5, i8* %v1, align 1
+  %v6 = load volatile i8, i8* %v0, align 1
+  %v7 = zext i8 %v6 to i32
+  %v8 = load volatile i8, i8* %v1, align 1
+  %v9 = zext i8 %v8 to i32
+  %v10 = add nuw nsw i32 %v9, %v7
+  ret i32 %v10
+}
+; CHECK: r{{[0-9]}} = vasrhub(r{{[0-9]}}:{{[0-9]}},#3):raw
+; CHECK: r{{[0-9]}} = vsathub(r{{[0-9]}}:{{[0-9]}})
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vd0.128B() #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/bug18491-optsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug18491-optsize.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug18491-optsize.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug18491-optsize.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,37 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: {{.balign 4|.p2align 2}}
+; CHECK: {{.balign 4|.p2align 2}}
+; CHECK: {{.balign 4|.p2align 2}}
+
+target triple = "hexagon"
+
+ at g0 = global i32 4, align 4
+ at g1 = global i32 4, align 4
+ at g2 = global i32 4, align 4
+ at g3 = global i32 4, align 4
+
+; Function Attrs: nounwind optsize
+define void @f0(i32 %a0) #0 {
+b0:
+  store i32 1, i32* @g0, align 4
+  ret void
+}
+
+; Function Attrs: nounwind optsize
+define void @f1(i32 %a0) #0 {
+b0:
+  store i32 1, i32* @g0, align 4
+  store i32 2, i32* @g1, align 4
+  store i32 3, i32* @g2, align 4
+  store i32 4, i32* @g3, align 4
+  ret void
+}
+
+; Function Attrs: nounwind optsize readnone
+define i32 @f2(i32 %a0, i8** nocapture readnone %a1) #1 {
+b0:
+  ret i32 %a0
+}
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind optsize readnone "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/bug19076.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug19076.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug19076.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug19076.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,226 @@
+; REQUIRES: asserts
+; RUN: llc -march=hexagon -stats -o /dev/null < %s
+
+%s.0 = type { %s.1*, %s.2*, %s.17*, i32, i32, i32, i32, i8*, i8*, i8* }
+%s.1 = type opaque
+%s.2 = type { %s.3, %s.4*, i8* }
+%s.3 = type { i32, i32 }
+%s.4 = type { %s.4*, %s.4*, %s.4*, %s.4*, i32, i32, i32, %s.3*, i32, [1 x %s.5]*, [1 x %s.5]*, i8, i8, i8, i8*, i32, %s.4*, %s.8*, i8, i8, i8, i32*, i32, i32*, i32, i8*, i8, %s.9, [32 x i8**], [7 x i8*], i32, i8*, i32, %s.4*, i32, i32, %s.11, %s.13, i8, i8, i8, %s.14*, %s.15*, %s.15*, i32, [12 x i8] }
+%s.5 = type { [1 x %s.6], i32, %s.7, [4 x i8] }
+%s.6 = type { [16 x i32] }
+%s.7 = type { [2 x i32] }
+%s.8 = type { void (i8*)*, i8*, i32, %s.8* }
+%s.9 = type { i8* (i8*)*, i8*, %s.7, i32, %s.10 }
+%s.10 = type { i32 }
+%s.11 = type { %s.12, i8, i8* }
+%s.12 = type { [2 x i32] }
+%s.13 = type { i32, i32 }
+%s.14 = type { i8*, i32 (i8*, %s.4*)* }
+%s.15 = type { %s.15*, %s.16*, i32 }
+%s.16 = type { %s.3, i32, %s.4*, %s.4*, %s.4*, i32, i32 }
+%s.17 = type { i32, void (i8*)* }
+%s.18 = type { %s.0*, i8* }
+
+; Function Attrs: nounwind
+define zeroext i8 @f0(%s.0* %a0, i32 %a1, %s.18* %a2) #0 {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = alloca %s.0*, align 4
+  %v2 = alloca i32, align 4
+  %v3 = alloca %s.18*, align 4
+  %v4 = alloca i32, align 4
+  %v5 = alloca i32, align 4
+  %v6 = alloca i8*
+  %v7 = alloca i32, align 4
+  %v8 = alloca i32
+  %v9 = alloca %s.4, align 32
+  store %s.0* %a0, %s.0** %v1, align 4
+  store i32 %a1, i32* %v2, align 4
+  store %s.18* %a2, %s.18** %v3, align 4
+  %v10 = load %s.0*, %s.0** %v1, align 4
+  %v11 = getelementptr inbounds %s.0, %s.0* %v10, i32 0, i32 3
+  %v12 = load i32, i32* %v11, align 4
+  store i32 %v12, i32* %v4, align 4
+  %v13 = load %s.0*, %s.0** %v1, align 4
+  %v14 = getelementptr inbounds %s.0, %s.0* %v13, i32 0, i32 6
+  %v15 = load i32, i32* %v14, align 4
+  store i32 %v15, i32* %v5, align 4
+  %v16 = load i32, i32* %v4, align 4
+  %v17 = call i8* @llvm.stacksave()
+  store i8* %v17, i8** %v6
+  %v18 = alloca %s.2, i32 %v16, align 8
+  %v19 = load %s.0*, %s.0** %v1, align 4
+  %v20 = call i32 @f1(%s.0* %v19)
+  %v21 = icmp ne i32 %v20, 0
+  br i1 %v21, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  store i8 8, i8* %v0
+  store i32 1, i32* %v8
+  br label %b23
+
+b2:                                               ; preds = %b0
+  %v22 = load %s.0*, %s.0** %v1, align 4
+  %v23 = getelementptr inbounds %s.0, %s.0* %v22, i32 0, i32 0
+  %v24 = load %s.1*, %s.1** %v23, align 4
+  %v25 = load %s.0*, %s.0** %v1, align 4
+  %v26 = getelementptr inbounds %s.0, %s.0* %v25, i32 0, i32 1
+  %v27 = load %s.2*, %s.2** %v26, align 4
+  %v28 = bitcast %s.2* %v27 to i8*
+  %v29 = bitcast %s.2* %v18 to i8*
+  %v30 = load i32, i32* %v4, align 4
+  %v31 = mul i32 16, %v30
+  %v32 = call zeroext i8 @f2(%s.1* %v24, i8* %v28, i8* %v29, i32 %v31)
+  %v33 = zext i8 %v32 to i32
+  %v34 = icmp ne i32 %v33, 0
+  br i1 %v34, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  store i8 1, i8* %v0
+  store i32 1, i32* %v8
+  br label %b23
+
+b4:                                               ; preds = %b2
+  store i32 0, i32* %v7, align 4
+  br label %b5
+
+b5:                                               ; preds = %b21, %b4
+  %v35 = load i32, i32* %v7, align 4
+  %v36 = load i32, i32* %v4, align 4
+  %v37 = icmp ult i32 %v35, %v36
+  br i1 %v37, label %b6, label %b7
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b6, %b5
+  %v38 = phi i1 [ false, %b5 ], [ true, %b6 ]
+  br i1 %v38, label %b8, label %b22
+
+b8:                                               ; preds = %b7
+  %v39 = load i32, i32* %v7, align 4
+  %v40 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v39
+  %v41 = getelementptr inbounds %s.2, %s.2* %v40, i32 0, i32 1
+  %v42 = load %s.4*, %s.4** %v41, align 4
+  %v43 = icmp ne %s.4* %v42, null
+  br i1 %v43, label %b9, label %b17
+
+b9:                                               ; preds = %b8
+  %v44 = load %s.0*, %s.0** %v1, align 4
+  %v45 = getelementptr inbounds %s.0, %s.0* %v44, i32 0, i32 0
+  %v46 = load %s.1*, %s.1** %v45, align 4
+  %v47 = load i32, i32* %v7, align 4
+  %v48 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v47
+  %v49 = getelementptr inbounds %s.2, %s.2* %v48, i32 0, i32 1
+  %v50 = load %s.4*, %s.4** %v49, align 4
+  %v51 = bitcast %s.4* %v50 to i8*
+  %v52 = bitcast %s.4* %v9 to i8*
+  %v53 = load i32, i32* %v5, align 4
+  %v54 = call zeroext i8 @f2(%s.1* %v46, i8* %v51, i8* %v52, i32 %v53)
+  %v55 = zext i8 %v54 to i32
+  %v56 = icmp ne i32 %v55, 0
+  br i1 %v56, label %b10, label %b11
+
+b10:                                              ; preds = %b9
+  store i8 1, i8* %v0
+  store i32 1, i32* %v8
+  br label %b23
+
+b11:                                              ; preds = %b9
+  %v57 = getelementptr inbounds %s.4, %s.4* %v9, i32 0, i32 5
+  %v58 = load i32, i32* %v57, align 4
+  %v59 = icmp ne i32 %v58, 0
+  br i1 %v59, label %b12, label %b13
+
+b12:                                              ; preds = %b11
+  br label %b14
+
+b13:                                              ; preds = %b11
+  %v60 = load %s.0*, %s.0** %v1, align 4
+  %v61 = getelementptr inbounds %s.0, %s.0* %v60, i32 0, i32 0
+  %v62 = load %s.1*, %s.1** %v61, align 4
+  %v63 = call i32 @f3(%s.1* %v62)
+  br label %b14
+
+b14:                                              ; preds = %b13, %b12
+  %v64 = phi i32 [ %v58, %b12 ], [ %v63, %b13 ]
+  %v65 = load i32, i32* %v2, align 4
+  %v66 = icmp eq i32 %v64, %v65
+  br i1 %v66, label %b15, label %b16
+
+b15:                                              ; preds = %b14
+  %v67 = load %s.0*, %s.0** %v1, align 4
+  %v68 = load %s.18*, %s.18** %v3, align 4
+  %v69 = getelementptr inbounds %s.18, %s.18* %v68, i32 0, i32 0
+  store %s.0* %v67, %s.0** %v69, align 4
+  %v70 = load i32, i32* %v7, align 4
+  %v71 = getelementptr inbounds %s.2, %s.2* %v18, i32 %v70
+  %v72 = getelementptr inbounds %s.2, %s.2* %v71, i32 0, i32 1
+  %v73 = load %s.4*, %s.4** %v72, align 4
+  %v74 = bitcast %s.4* %v73 to i8*
+  %v75 = load %s.18*, %s.18** %v3, align 4
+  %v76 = getelementptr inbounds %s.18, %s.18* %v75, i32 0, i32 1
+  store i8* %v74, i8** %v76, align 4
+  store i8 0, i8* %v0
+  store i32 1, i32* %v8
+  br label %b23
+
+b16:                                              ; preds = %b14
+  br label %b20
+
+b17:                                              ; preds = %b8
+  %v77 = load i32, i32* %v7, align 4
+  %v78 = icmp eq i32 %v77, 0
+  br i1 %v78, label %b18, label %b19
+
+b18:                                              ; preds = %b17
+  %v79 = load %s.0*, %s.0** %v1, align 4
+  %v80 = load %s.18*, %s.18** %v3, align 4
+  %v81 = getelementptr inbounds %s.18, %s.18* %v80, i32 0, i32 0
+  store %s.0* %v79, %s.0** %v81, align 4
+  %v82 = load %s.18*, %s.18** %v3, align 4
+  %v83 = getelementptr inbounds %s.18, %s.18* %v82, i32 0, i32 1
+  store i8* null, i8** %v83, align 4
+  store i8 0, i8* %v0
+  store i32 1, i32* %v8
+  br label %b23
+
+b19:                                              ; preds = %b17
+  br label %b20
+
+b20:                                              ; preds = %b19, %b16
+  br label %b21
+
+b21:                                              ; preds = %b20
+  %v84 = load i32, i32* %v7, align 4
+  %v85 = add i32 %v84, 1
+  store i32 %v85, i32* %v7, align 4
+  br label %b5
+
+b22:                                              ; preds = %b7
+  store i8 4, i8* %v0
+  store i32 1, i32* %v8
+  br label %b23
+
+b23:                                              ; preds = %b22, %b18, %b15, %b10, %b3, %b1
+  %v86 = load i8*, i8** %v6
+  call void @llvm.stackrestore(i8* %v86)
+  %v87 = load i8, i8* %v0
+  ret i8 %v87
+}
+
+; Function Attrs: nounwind
+declare i8* @llvm.stacksave() #0
+
+; Function Attrs: inlinehint nounwind
+declare i32 @f1(%s.0*) #1
+
+declare zeroext i8 @f2(%s.1*, i8*, i8*, i32) #0
+
+declare i32 @f3(%s.1*) #0
+
+; Function Attrs: nounwind
+declare void @llvm.stackrestore(i8*) #0
+
+attributes #0 = { nounwind }
+attributes #1 = { inlinehint nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/bug19119.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug19119.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug19119.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug19119.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,49 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: .sdata.4.g0,"aM"
+
+target triple = "hexagon-unknown--elf"
+
+%s.0 = type { i32 }
+
+ at g0 = global %s.0 { i32 3 }, align 4 #0
+ at g1 = global i32 0, align 4 #1
+ at g2 = global %s.0* @g0, align 4 #2
+ at g3 = global i32 0, align 4 #3
+ at g4 = global i32 0, align 4 #4
+
+; Function Attrs: nounwind optsize
+define i32 @f0() #5 section ".text.main" {
+b0:
+  %v0 = load i32, i32* @g3, align 4, !tbaa !4
+  %v1 = add nsw i32 %v0, 1
+  store i32 %v1, i32* @g3, align 4, !tbaa !4
+  %v2 = load i8*, i8** bitcast (%s.0** @g2 to i8**), align 4, !tbaa !8
+  %v3 = load i32, i32* @g1, align 4, !tbaa !10
+  %v4 = getelementptr inbounds i8, i8* %v2, i32 %v3
+  %v5 = bitcast i8* %v4 to i32*
+  %v6 = load i32, i32* %v5, align 4, !tbaa !4
+  store i32 %v6, i32* @g4, align 4, !tbaa !4
+  store i32 1, i32* @g3, align 4, !tbaa !4
+  ret i32 0
+}
+
+attributes #0 = { "linker_input_section"=".sdata.4.cccc" "linker_output_section"=".sdata.4" }
+attributes #1 = { "linker_input_section"=".sbss.4.np" "linker_output_section"=".sbss.4" }
+attributes #2 = { "linker_input_section"=".sdata.4.cp" "linker_output_section"=".sdata.4" }
+attributes #3 = { "linker_input_section"=".sbss.4.counter" "linker_output_section"=".sbss.4" }
+attributes #4 = { "linker_input_section"=".sbss.4.value" "linker_output_section"=".sbss.4" }
+attributes #5 = { nounwind optsize "target-cpu"="hexagonv55" }
+
+!llvm.module.flags = !{!0, !2}
+
+!0 = !{i32 6, !"Target CPU", !1}
+!1 = !{!"hexagonv55"}
+!2 = !{i32 6, !"Target Features", !3}
+!3 = !{!"-hvx"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !6, i64 0}
+!6 = !{!"omnipotent char", !7, i64 0}
+!7 = !{!"Simple C/C++ TBAA"}
+!8 = !{!9, !9, i64 0}
+!9 = !{!"any pointer", !6, i64 0}
+!10 = !{!6, !6, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug19254-ifconv-vec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,152 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; we really just want to be sure this compilation does not abort.
+; CHECK: vadd
+
+target triple = "hexagon"
+
+ at g0 = private unnamed_addr constant [39 x i8] c"\0AnumTrainingSet =%d  numFeatures = %d\0A\00", align 1
+
+; Function Attrs: nounwind
+declare i32 @f0(i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind
+define void @f1(i16* nocapture readnone %a0, i16 signext %a1, i16 signext %a2, i16* nocapture readnone %a3, i16* nocapture readnone %a4, i16* nocapture %a5, i16 signext %a6, i16 signext %a7) #0 {
+b0:
+  %v0 = sext i16 %a1 to i32
+  %v1 = sext i16 %a2 to i32
+  %v2 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @g0, i32 0, i32 0), i32 %v0, i32 %v1) #2
+  %v3 = tail call <32 x i32> @llvm.hexagon.V6.vd0.128B()
+  br label %b1
+
+b1:                                               ; preds = %b18, %b0
+  %v4 = phi i32 [ 0, %b0 ], [ %v57, %b18 ]
+  %v5 = phi <32 x i32> [ %v3, %b0 ], [ %v56, %b18 ]
+  %v6 = icmp slt i32 %v4, %v1
+  br i1 %v6, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v5, <32 x i32> %v7)
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v9 = phi <32 x i32> [ %v8, %b2 ], [ %v5, %b1 ]
+  %v10 = add nuw nsw i32 %v4, 1
+  %v11 = icmp slt i32 %v10, %v1
+  br i1 %v11, label %b5, label %b6
+
+b4:                                               ; preds = %b18
+  %v12 = sext i16 %a6 to i32
+  %v13 = tail call double @f3(double 1.000000e+00, i32 %v12) #2
+  %v14 = fptosi double %v13 to i32
+  %v15 = mul nsw i32 %v0, 2
+  %v16 = sitofp i32 %v15 to double
+  %v17 = tail call double @f3(double 1.000000e+00, i32 %v12) #2
+  %v18 = fmul double %v16, %v17
+  %v19 = fptosi double %v18 to i32
+  %v20 = tail call i32 @f2(i32 %v14, i32 %v19, i16 signext %a6) #2
+  %v21 = extractelement <32 x i32> %v56, i32 0
+  %v22 = mul nsw i32 %v20, %v21
+  %v23 = trunc i32 %v22 to i16
+  store i16 %v23, i16* %a5, align 2, !tbaa !0
+  ret void
+
+b5:                                               ; preds = %b3
+  %v24 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v25 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v9, <32 x i32> %v24)
+  br label %b6
+
+b6:                                               ; preds = %b5, %b3
+  %v26 = phi <32 x i32> [ %v25, %b5 ], [ %v9, %b3 ]
+  %v27 = add nsw i32 %v4, 2
+  %v28 = icmp slt i32 %v27, %v1
+  br i1 %v28, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  %v29 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v30 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v26, <32 x i32> %v29)
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6
+  %v31 = phi <32 x i32> [ %v30, %b7 ], [ %v26, %b6 ]
+  %v32 = add nsw i32 %v4, 3
+  %v33 = icmp slt i32 %v32, %v1
+  br i1 %v33, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  %v34 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v35 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v31, <32 x i32> %v34)
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  %v36 = phi <32 x i32> [ %v35, %b9 ], [ %v31, %b8 ]
+  %v37 = add nsw i32 %v4, 4
+  %v38 = icmp slt i32 %v37, %v1
+  br i1 %v38, label %b11, label %b12
+
+b11:                                              ; preds = %b10
+  %v39 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v40 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v36, <32 x i32> %v39)
+  br label %b12
+
+b12:                                              ; preds = %b11, %b10
+  %v41 = phi <32 x i32> [ %v40, %b11 ], [ %v36, %b10 ]
+  %v42 = add nsw i32 %v4, 5
+  %v43 = icmp slt i32 %v42, %v1
+  br i1 %v43, label %b13, label %b14
+
+b13:                                              ; preds = %b12
+  %v44 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v45 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v41, <32 x i32> %v44)
+  br label %b14
+
+b14:                                              ; preds = %b13, %b12
+  %v46 = phi <32 x i32> [ %v45, %b13 ], [ %v41, %b12 ]
+  %v47 = add nsw i32 %v4, 6
+  %v48 = icmp slt i32 %v47, %v1
+  br i1 %v48, label %b15, label %b16
+
+b15:                                              ; preds = %b14
+  %v49 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v50 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v46, <32 x i32> %v49)
+  br label %b16
+
+b16:                                              ; preds = %b15, %b14
+  %v51 = phi <32 x i32> [ %v50, %b15 ], [ %v46, %b14 ]
+  %v52 = add nsw i32 %v4, 7
+  %v53 = icmp slt i32 %v52, %v1
+  br i1 %v53, label %b17, label %b18
+
+b17:                                              ; preds = %b16
+  %v54 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> undef, i32 16)
+  %v55 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> %v51, <32 x i32> %v54)
+  br label %b18
+
+b18:                                              ; preds = %b17, %b16
+  %v56 = phi <32 x i32> [ %v55, %b17 ], [ %v51, %b16 ]
+  %v57 = add nsw i32 %v4, 8
+  %v58 = icmp eq i32 %v57, 64
+  br i1 %v58, label %b4, label %b1
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vd0.128B() #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32>, <32 x i32>) #1
+
+declare i32 @f2(i32, i32, i16 signext) #0
+
+declare double @f3(double, i32)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/bug27085.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug27085.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug27085.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug27085.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc -march=hexagon -relocation-model=pic -mattr=+long-calls < %s | FileCheck --check-prefix=CHECK-LONG %s
+; RUN: llc -march=hexagon -relocation-model=pic < %s | FileCheck %s
+
+; CHECK-LONG: call ##g0 at GDPLT
+; CHECK-LONG-NOT: call g0 at GDPLT
+; CHECK: call g0 at GDPLT
+; CHECK-NOT: call ##g0 at GDPLT
+
+target triple = "hexagon--linux"
+
+ at g0 = internal thread_local global i32 0, align 4
+
+; Function Attrs: norecurse nounwind
+define void @f0(i32 %a0) local_unnamed_addr #0 {
+b0:
+  store volatile i32 1, i32* @g0, align 4, !tbaa !1
+  ret void
+}
+
+; Function Attrs: norecurse nounwind
+define zeroext i1 @f1() local_unnamed_addr #0 {
+b0:
+  %v0 = load volatile i32, i32* @g0, align 4, !tbaa !1
+  %v1 = icmp eq i32 %v0, 0
+  br i1 %v1, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  store volatile i32 0, i32* @g0, align 4, !tbaa !1
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v2 = phi i1 [ true, %b1 ], [ false, %b0 ]
+  ret i1 %v2
+}
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 7, !"PIC Level", i32 1}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/bug31839.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug31839.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug31839.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug31839.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,26 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Check for successful compilation.
+
+define i8* @f0(i32 %a0, i32 %a1) {
+b0:
+  %v0 = call noalias i8* @f1(i32 undef, i32 undef)
+  br i1 undef, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v1 = ptrtoint i8* %v0 to i32
+  %v2 = bitcast i8* %v0 to i32*
+  store volatile i32 %v1, i32* %v2, align 4
+  %v3 = getelementptr inbounds i8, i8* %v0, i32 4
+  %v4 = bitcast i8* %v3 to i8**
+  store i8* %v0, i8** %v4, align 4
+  %v5 = getelementptr inbounds i8, i8* %v0, i32 16
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v6 = phi i8* [ %v5, %b1 ], [ null, %b0 ]
+  ret i8* %v6
+}
+
+declare noalias i8* @f1(i32, i32) local_unnamed_addr

Added: llvm/trunk/test/CodeGen/Hexagon/bug6757-endloop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug6757-endloop.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug6757-endloop.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug6757-endloop.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,107 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Make sure that we can handle loops with multiple ENDLOOP instructions.
+; This situation can arise due to tail duplication.
+
+; CHECK: loop1([[LP:.LBB0_[0-9]+]]
+; CHECK: [[LP]]:
+; CHECK-NOT: loop1(
+; CHECK: endloop1
+; CHECK: endloop1
+
+%s.0 = type { i32, i8* }
+%s.1 = type { i32, i32, i32, i32 }
+
+define void @f0(%s.0* nocapture readonly %a0, %s.1* nocapture readonly %a1) {
+b0:
+  %v0 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 0
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 3
+  %v3 = load i32, i32* %v2, align 4
+  %v4 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 2
+  %v5 = load i32, i32* %v4, align 4
+  %v6 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 1
+  %v7 = load i32, i32* %v6, align 4
+  %v8 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 1
+  %v9 = load i8*, i8** %v8, align 4
+  %v10 = bitcast i8* %v9 to i32*
+  %v11 = mul i32 %v1, 10
+  %v12 = icmp eq i32 %v1, %v3
+  %v13 = icmp eq i32 %v5, 0
+  br i1 %v12, label %b3, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 %v13, label %b14, label %b2
+
+b2:                                               ; preds = %b1
+  %v14 = lshr i32 %v11, 5
+  %v15 = getelementptr inbounds i32, i32* %v10, i32 %v14
+  %v16 = and i32 %v11, 30
+  %v17 = icmp eq i32 %v16, 0
+  br label %b11
+
+b3:                                               ; preds = %b0
+  br i1 %v13, label %b14, label %b4
+
+b4:                                               ; preds = %b3
+  %v18 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 0
+  br label %b5
+
+b5:                                               ; preds = %b6, %b4
+  %v19 = phi i32 [ %v11, %b4 ], [ %v22, %b6 ]
+  %v20 = phi i32 [ %v5, %b4 ], [ %v21, %b6 ]
+  %v21 = add i32 %v20, -1
+  %v22 = add i32 %v19, -10
+  %v23 = lshr i32 %v22, 5
+  %v24 = getelementptr inbounds i32, i32* %v10, i32 %v23
+  %v25 = and i32 %v22, 31
+  %v26 = load i32, i32* %v18, align 4
+  %v27 = mul i32 %v26, %v7
+  %v28 = icmp eq i32 %v25, 0
+  br i1 %v28, label %b7, label %b6
+
+b6:                                               ; preds = %b10, %b9, %b8, %b5
+  %v29 = icmp eq i32 %v21, 0
+  br i1 %v29, label %b14, label %b5
+
+b7:                                               ; preds = %b5
+  %v30 = icmp ugt i32 %v27, 1
+  br i1 %v30, label %b8, label %b9
+
+b8:                                               ; preds = %b7
+  %v31 = icmp ugt i32 %v27, 3
+  br i1 %v31, label %b10, label %b6
+
+b9:                                               ; preds = %b7
+  %v32 = load volatile i32, i32* %v24, align 4
+  store volatile i32 %v32, i32* %v24, align 4
+  br label %b6
+
+b10:                                              ; preds = %b10, %b8
+  %v33 = phi i32 [ %v37, %b10 ], [ %v27, %b8 ]
+  %v34 = phi i32* [ %v35, %b10 ], [ %v24, %b8 ]
+  %v35 = getelementptr inbounds i32, i32* %v34, i32 -1
+  %v36 = load volatile i32, i32* %v34, align 4
+  %v37 = add i32 %v33, -4
+  %v38 = icmp ugt i32 %v37, 3
+  br i1 %v38, label %b10, label %b6
+
+b11:                                              ; preds = %b12, %b2
+  %v39 = phi i32 [ %v5, %b2 ], [ %v40, %b12 ]
+  %v40 = add i32 %v39, -1
+  br i1 %v17, label %b13, label %b12
+
+b12:                                              ; preds = %b13, %b11
+  %v41 = icmp eq i32 %v40, 0
+  br i1 %v41, label %b14, label %b11
+
+b13:                                              ; preds = %b11
+  %v42 = load volatile i32, i32* %v15, align 4
+  %v43 = load volatile i32, i32* %v15, align 4
+  %v44 = and i32 %v43, %v42
+  store volatile i32 %v44, i32* %v15, align 4
+  br label %b12
+
+b14:                                              ; preds = %b12, %b6, %b3, %b1
+  ret void
+}

Added: llvm/trunk/test/CodeGen/Hexagon/bug9049.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug9049.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug9049.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug9049.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,20 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+
+target triple = "hexagon-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = bitcast i32* %v0 to i64*
+  %v2 = load i64, i64* %v1, align 8
+; CHECK: 	call f1
+  %v3 = call i32 @f1(i64 %v2)
+  unreachable
+}
+
+; Function Attrs: inlinehint nounwind
+declare i32 @f1(i64) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { inlinehint nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/bug9963.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/bug9963.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/bug9963.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/bug9963.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,15 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK-NOT: memd
+; CHECK: call f1
+; CHECK: r{{[0-9]}}:{{[0-9]}} = combine(#0,#10)
+target triple = "hexagon"
+
+define i64 @f0(i32 %a0) {
+b0:
+  %v0 = add nsw i32 %a0, 5
+  %v1 = tail call i64 @f1(i32 %v0)
+  %v2 = add nsw i64 %v1, 10
+  ret i64 %v2
+}
+
+declare i64 @f1(i32)

Added: llvm/trunk/test/CodeGen/Hexagon/call-long1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/call-long1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/call-long1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/call-long1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon -spill-func-threshold-Os=0 -spill-func-threshold=0  < %s | FileCheck %s
+
+; Check that the long-calls feature handles save and restore.
+; CHECK: call ##__save
+; CHECK: jump ##__restore
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32, i32, i32)*)(i32 %a0, i32 %a1, i32 %a2) #1
+  %v1 = tail call i32 bitcast (i32 (...)* @f2 to i32 (i32, i32, i32)*)(i32 %a0, i32 %a1, i32 %a2) #1
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare i32 @f1(...) #1
+
+; Function Attrs: nounwind
+declare i32 @f2(...) #1
+
+attributes #0 = { nounwind "target-features"="+long-calls" }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/call-v4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/call-v4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/call-v4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/call-v4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,34 @@
+; RUN: llc -march=hexagon -print-machineinstrs=expand-isel-pseudos -o /dev/null 2>&1 < %s | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: J2_call @f1
+; CHECK: PS_call_nr @f2
+
+target triple = "hexagon"
+
+ at g0 = external global i32
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = load i32, i32* @g0, align 4
+  %v1 = tail call i32 @f1(i32 %v0) #0
+  %v2 = icmp eq i32 %v1, 0
+  br i1 %v2, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  tail call void @f2() #2
+  unreachable
+
+b2:                                               ; preds = %b0
+  ret i32 0
+}
+
+declare i32 @f1(i32)
+
+; Function Attrs: noreturn
+declare void @f2() #1
+
+attributes #0 = { nounwind "disable-tail-calls"="true" }
+attributes #1 = { noreturn }
+attributes #2 = { noreturn nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/callR_noreturn.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/callR_noreturn.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/callR_noreturn.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/callR_noreturn.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,22 @@
+; RUN: llc -march=hexagon  < %s | FileCheck %s
+; CHECK: callr {{r[0-9]+}}
+
+%s.0 = type { [1 x %s.1], [4 x i8*] }
+%s.1 = type { [1 x %s.2], i32, [4 x i8] }
+%s.2 = type { [16 x i32] }
+
+; Function Attrs: noreturn nounwind
+define hidden void @f0() #0 section ".text.compat" {
+b0:
+  br i1 undef, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  call void undef(%s.0* undef) #1
+  unreachable
+}
+
+attributes #0 = { noreturn nounwind }
+attributes #1 = { noreturn }

Added: llvm/trunk/test/CodeGen/Hexagon/calling-conv.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/calling-conv.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/calling-conv.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/calling-conv.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,37 @@
+; RUN: llc -march=hexagon -mno-pairing -mno-compound <%s | FileCheck %s --check-prefix=CHECK-ONE
+; RUN: llc -march=hexagon -mno-pairing -mno-compound <%s | FileCheck %s --check-prefix=CHECK-TWO
+; RUN: llc -march=hexagon -mno-pairing -mno-compound <%s | FileCheck %s --check-prefix=CHECK-THREE
+
+%s.0 = type { i32, i8, i64 }
+%s.1 = type { i8, i64 }
+
+ at g0 = external global %s.0*
+
+; CHECK-ONE:    memw(r29+#48) = r2
+; CHECK-TWO:    memw(r29+#52) = r2
+; CHECK-THREE:  memw(r29+#56) = r2
+
+define void @f0(%s.0* noalias nocapture sret %a0, i32 %a1, i8 zeroext %a2, %s.0* byval nocapture readnone align 8 %a3, %s.1* byval nocapture readnone align 8 %a4) #0 {
+b0:
+  %v0 = alloca %s.0, align 8
+  %v1 = load %s.0*, %s.0** @g0, align 4
+  %v2 = sext i32 %a1 to i64
+  %v3 = add nsw i64 %v2, 1
+  %v4 = add nsw i32 %a1, 2
+  %v5 = add nsw i64 %v2, 3
+  call void @f1(%s.0* sret %v0, i32 45, %s.0* byval align 8 %v1, %s.0* byval align 8 %v1, i8 zeroext %a2, i64 %v3, i32 %v4, i64 %v5, i8 zeroext %a2, i8 zeroext %a2, i8 zeroext %a2, i32 45)
+  %v6 = bitcast %s.0* %v0 to i32*
+  store i32 20, i32* %v6, align 8
+  %v7 = bitcast %s.0* %a0 to i8*
+  %v8 = bitcast %s.0* %v0 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v7, i8* align 8 %v8, i32 16, i1 false)
+  ret void
+}
+
+declare void @f1(%s.0* sret, i32, %s.0* byval align 8, %s.0* byval align 8, i8 zeroext, i64, i32, i64, i8 zeroext, i8 zeroext, i8 zeroext, i32)
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/cext-ice.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cext-ice.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cext-ice.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/cext-ice.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,300 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = alloca [8 x i32], align 8
+  %v1 = bitcast [8 x i32]* %v0 to i8*
+  call void @llvm.memset.p0i8.i32(i8* align 8 %v1, i8 0, i32 32, i1 false)
+  %v2 = icmp sgt i32 %a0, 0
+  br i1 %v2, label %b1, label %b18
+
+b1:                                               ; preds = %b0
+  %v3 = getelementptr inbounds [8 x i32], [8 x i32]* %v0, i32 0, i32 6
+  %v4 = inttoptr i32 %a1 to i32*
+  %v5 = add i32 %a0, -1
+  %v6 = icmp sgt i32 %v5, 0
+  br i1 %v6, label %b2, label %b13
+
+b2:                                               ; preds = %b1
+  %v7 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 0
+  %v8 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 1
+  %v9 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 2
+  %v10 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 3
+  %v11 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 4
+  %v12 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 5
+  %v13 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 6
+  %v14 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 7
+  %v15 = add i32 %a0, -2
+  %v16 = lshr i32 %v15, 1
+  %v17 = add i32 %v16, 1
+  %v18 = urem i32 %v17, 2
+  %v19 = icmp ne i32 %v18, 0
+  %v20 = add i32 %v5, -2
+  %v21 = icmp ugt i32 %v17, 1
+  br i1 %v21, label %b3, label %b7
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b22, %b3
+  %v22 = phi i32 [ 0, %b3 ], [ %v124, %b22 ]
+  %v23 = phi i32 [ 0, %b3 ], [ %v136, %b22 ]
+  %v24 = mul nsw i32 %v22, 4
+  %v25 = add nsw i32 %v24, 268435456
+  %v26 = inttoptr i32 %v25 to i32*
+  store volatile i32 %a1, i32* %v26, align 4, !tbaa !0
+  %v27 = load i32, i32* %v7, align 8, !tbaa !0
+  store volatile i32 %v27, i32* %v4, align 4, !tbaa !0
+  %v28 = load i32, i32* %v8, align 4, !tbaa !0
+  store volatile i32 %v28, i32* %v4, align 4, !tbaa !0
+  %v29 = load i32, i32* %v9, align 8, !tbaa !0
+  store volatile i32 %v29, i32* %v4, align 4, !tbaa !0
+  %v30 = load i32, i32* %v10, align 4, !tbaa !0
+  store volatile i32 %v30, i32* %v4, align 4, !tbaa !0
+  %v31 = load i32, i32* %v11, align 8, !tbaa !0
+  store volatile i32 %v31, i32* %v4, align 4, !tbaa !0
+  %v32 = load i32, i32* %v12, align 4, !tbaa !0
+  store volatile i32 %v32, i32* %v4, align 4, !tbaa !0
+  %v33 = load i32, i32* %v13, align 8, !tbaa !0
+  store volatile i32 %v33, i32* %v4, align 4, !tbaa !0
+  %v34 = load i32, i32* %v14, align 4, !tbaa !0
+  store volatile i32 %v34, i32* %v4, align 4, !tbaa !0
+  %v35 = icmp eq i32 %v23, 0
+  br i1 %v35, label %b19, label %b20
+
+b5:                                               ; preds = %b22
+  %v36 = phi i32 [ %v136, %b22 ]
+  %v37 = phi i32 [ %v124, %b22 ]
+  br i1 %v19, label %b6, label %b12
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b6, %b2
+  %v38 = phi i32 [ 0, %b2 ], [ %v36, %b6 ]
+  %v39 = phi i32 [ 0, %b2 ], [ %v37, %b6 ]
+  br label %b8
+
+b8:                                               ; preds = %b10, %b7
+  %v40 = phi i32 [ %v39, %b7 ], [ %v54, %b10 ]
+  %v41 = phi i32 [ %v38, %b7 ], [ %v66, %b10 ]
+  %v42 = mul nsw i32 %v40, 4
+  %v43 = add nsw i32 %v42, 268435456
+  %v44 = inttoptr i32 %v43 to i32*
+  store volatile i32 %a1, i32* %v44, align 4, !tbaa !0
+  %v45 = load i32, i32* %v7, align 8, !tbaa !0
+  store volatile i32 %v45, i32* %v4, align 4, !tbaa !0
+  %v46 = load i32, i32* %v8, align 4, !tbaa !0
+  store volatile i32 %v46, i32* %v4, align 4, !tbaa !0
+  %v47 = load i32, i32* %v9, align 8, !tbaa !0
+  store volatile i32 %v47, i32* %v4, align 4, !tbaa !0
+  %v48 = load i32, i32* %v10, align 4, !tbaa !0
+  store volatile i32 %v48, i32* %v4, align 4, !tbaa !0
+  %v49 = load i32, i32* %v11, align 8, !tbaa !0
+  store volatile i32 %v49, i32* %v4, align 4, !tbaa !0
+  %v50 = load i32, i32* %v12, align 4, !tbaa !0
+  store volatile i32 %v50, i32* %v4, align 4, !tbaa !0
+  %v51 = load i32, i32* %v13, align 8, !tbaa !0
+  store volatile i32 %v51, i32* %v4, align 4, !tbaa !0
+  %v52 = load i32, i32* %v14, align 4, !tbaa !0
+  store volatile i32 %v52, i32* %v4, align 4, !tbaa !0
+  %v53 = icmp eq i32 %v41, 0
+  br i1 %v53, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  store i32 0, i32* %v3, align 8, !tbaa !0
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  %v54 = phi i32 [ 3, %b9 ], [ %v40, %b8 ]
+  %v55 = mul nsw i32 %v54, 4
+  %v56 = add nsw i32 %v55, 268435456
+  %v57 = inttoptr i32 %v56 to i32*
+  store volatile i32 %a1, i32* %v57, align 4, !tbaa !0
+  %v58 = load i32, i32* %v7, align 8, !tbaa !0
+  store volatile i32 %v58, i32* %v4, align 4, !tbaa !0
+  %v59 = load i32, i32* %v8, align 4, !tbaa !0
+  store volatile i32 %v59, i32* %v4, align 4, !tbaa !0
+  %v60 = load i32, i32* %v9, align 8, !tbaa !0
+  store volatile i32 %v60, i32* %v4, align 4, !tbaa !0
+  %v61 = load i32, i32* %v10, align 4, !tbaa !0
+  store volatile i32 %v61, i32* %v4, align 4, !tbaa !0
+  %v62 = load i32, i32* %v11, align 8, !tbaa !0
+  store volatile i32 %v62, i32* %v4, align 4, !tbaa !0
+  %v63 = load i32, i32* %v12, align 4, !tbaa !0
+  store volatile i32 %v63, i32* %v4, align 4, !tbaa !0
+  %v64 = load i32, i32* %v13, align 8, !tbaa !0
+  store volatile i32 %v64, i32* %v4, align 4, !tbaa !0
+  %v65 = load i32, i32* %v14, align 4, !tbaa !0
+  store volatile i32 %v65, i32* %v4, align 4, !tbaa !0
+  %v66 = add nsw i32 %v41, 2
+  %v67 = icmp slt i32 %v66, %v5
+  br i1 %v67, label %b8, label %b11
+
+b11:                                              ; preds = %b10
+  %v68 = phi i32 [ %v66, %b10 ]
+  %v69 = phi i32 [ %v54, %b10 ]
+  br label %b12
+
+b12:                                              ; preds = %b11, %b5
+  %v70 = phi i32 [ %v36, %b5 ], [ %v68, %b11 ]
+  %v71 = phi i32 [ %v37, %b5 ], [ %v69, %b11 ]
+  %v72 = icmp eq i32 %v70, %a0
+  br i1 %v72, label %b18, label %b13
+
+b13:                                              ; preds = %b12, %b1
+  %v73 = phi i32 [ 0, %b1 ], [ %v70, %b12 ]
+  %v74 = phi i32 [ 0, %b1 ], [ %v71, %b12 ]
+  %v75 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 0
+  %v76 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 1
+  %v77 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 2
+  %v78 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 3
+  %v79 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 4
+  %v80 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 5
+  %v81 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 6
+  %v82 = getelementptr [8 x i32], [8 x i32]* %v0, i32 0, i32 7
+  br label %b14
+
+b14:                                              ; preds = %b16, %b13
+  %v83 = phi i32 [ %v74, %b13 ], [ %v86, %b16 ]
+  %v84 = phi i32 [ %v73, %b13 ], [ %v98, %b16 ]
+  %v85 = icmp eq i32 %v84, 1
+  br i1 %v85, label %b15, label %b16
+
+b15:                                              ; preds = %b14
+  store i32 0, i32* %v3, align 8, !tbaa !0
+  br label %b16
+
+b16:                                              ; preds = %b15, %b14
+  %v86 = phi i32 [ 3, %b15 ], [ %v83, %b14 ]
+  %v87 = mul nsw i32 %v86, 4
+  %v88 = add nsw i32 %v87, 268435456
+  %v89 = inttoptr i32 %v88 to i32*
+  store volatile i32 %a1, i32* %v89, align 4, !tbaa !0
+  %v90 = load i32, i32* %v75, align 8, !tbaa !0
+  store volatile i32 %v90, i32* %v4, align 4, !tbaa !0
+  %v91 = load i32, i32* %v76, align 4, !tbaa !0
+  store volatile i32 %v91, i32* %v4, align 4, !tbaa !0
+  %v92 = load i32, i32* %v77, align 8, !tbaa !0
+  store volatile i32 %v92, i32* %v4, align 4, !tbaa !0
+  %v93 = load i32, i32* %v78, align 4, !tbaa !0
+  store volatile i32 %v93, i32* %v4, align 4, !tbaa !0
+  %v94 = load i32, i32* %v79, align 8, !tbaa !0
+  store volatile i32 %v94, i32* %v4, align 4, !tbaa !0
+  %v95 = load i32, i32* %v80, align 4, !tbaa !0
+  store volatile i32 %v95, i32* %v4, align 4, !tbaa !0
+  %v96 = load i32, i32* %v81, align 8, !tbaa !0
+  store volatile i32 %v96, i32* %v4, align 4, !tbaa !0
+  %v97 = load i32, i32* %v82, align 4, !tbaa !0
+  store volatile i32 %v97, i32* %v4, align 4, !tbaa !0
+  %v98 = add nsw i32 %v84, 1
+  %v99 = icmp eq i32 %v98, %a0
+  br i1 %v99, label %b17, label %b14
+
+b17:                                              ; preds = %b16
+  br label %b18
+
+b18:                                              ; preds = %b17, %b12, %b0
+  ret void
+
+b19:                                              ; preds = %b4
+  store i32 0, i32* %v3, align 8, !tbaa !0
+  br label %b20
+
+b20:                                              ; preds = %b19, %b4
+  %v100 = phi i32 [ 3, %b19 ], [ %v22, %b4 ]
+  %v101 = mul nsw i32 %v100, 4
+  %v102 = add nsw i32 %v101, 268435456
+  %v103 = inttoptr i32 %v102 to i32*
+  store volatile i32 %a1, i32* %v103, align 4, !tbaa !0
+  %v104 = load i32, i32* %v7, align 8, !tbaa !0
+  store volatile i32 %v104, i32* %v4, align 4, !tbaa !0
+  %v105 = load i32, i32* %v8, align 4, !tbaa !0
+  store volatile i32 %v105, i32* %v4, align 4, !tbaa !0
+  %v106 = load i32, i32* %v9, align 8, !tbaa !0
+  store volatile i32 %v106, i32* %v4, align 4, !tbaa !0
+  %v107 = load i32, i32* %v10, align 4, !tbaa !0
+  store volatile i32 %v107, i32* %v4, align 4, !tbaa !0
+  %v108 = load i32, i32* %v11, align 8, !tbaa !0
+  store volatile i32 %v108, i32* %v4, align 4, !tbaa !0
+  %v109 = load i32, i32* %v12, align 4, !tbaa !0
+  store volatile i32 %v109, i32* %v4, align 4, !tbaa !0
+  %v110 = load i32, i32* %v13, align 8, !tbaa !0
+  store volatile i32 %v110, i32* %v4, align 4, !tbaa !0
+  %v111 = load i32, i32* %v14, align 4, !tbaa !0
+  store volatile i32 %v111, i32* %v4, align 4, !tbaa !0
+  %v112 = add nsw i32 %v23, 2
+  %v113 = mul nsw i32 %v100, 4
+  %v114 = add nsw i32 %v113, 268435456
+  %v115 = inttoptr i32 %v114 to i32*
+  store volatile i32 %a1, i32* %v115, align 4, !tbaa !0
+  %v116 = load i32, i32* %v7, align 8, !tbaa !0
+  store volatile i32 %v116, i32* %v4, align 4, !tbaa !0
+  %v117 = load i32, i32* %v8, align 4, !tbaa !0
+  store volatile i32 %v117, i32* %v4, align 4, !tbaa !0
+  %v118 = load i32, i32* %v9, align 8, !tbaa !0
+  store volatile i32 %v118, i32* %v4, align 4, !tbaa !0
+  %v119 = load i32, i32* %v10, align 4, !tbaa !0
+  store volatile i32 %v119, i32* %v4, align 4, !tbaa !0
+  %v120 = load i32, i32* %v11, align 8, !tbaa !0
+  store volatile i32 %v120, i32* %v4, align 4, !tbaa !0
+  %v121 = load i32, i32* %v12, align 4, !tbaa !0
+  store volatile i32 %v121, i32* %v4, align 4, !tbaa !0
+  %v122 = load i32, i32* %v13, align 8, !tbaa !0
+  store volatile i32 %v122, i32* %v4, align 4, !tbaa !0
+  %v123 = load i32, i32* %v14, align 4, !tbaa !0
+  store volatile i32 %v123, i32* %v4, align 4, !tbaa !0
+  br i1 false, label %b21, label %b22
+
+b21:                                              ; preds = %b20
+  store i32 0, i32* %v3, align 8, !tbaa !0
+  br label %b22
+
+b22:                                              ; preds = %b21, %b20
+  %v124 = phi i32 [ 3, %b21 ], [ %v100, %b20 ]
+  %v125 = mul nsw i32 %v124, 4
+  %v126 = add nsw i32 %v125, 268435456
+  %v127 = inttoptr i32 %v126 to i32*
+  store volatile i32 %a1, i32* %v127, align 4, !tbaa !0
+  %v128 = load i32, i32* %v7, align 8, !tbaa !0
+  store volatile i32 %v128, i32* %v4, align 4, !tbaa !0
+  %v129 = load i32, i32* %v8, align 4, !tbaa !0
+  store volatile i32 %v129, i32* %v4, align 4, !tbaa !0
+  %v130 = load i32, i32* %v9, align 8, !tbaa !0
+  store volatile i32 %v130, i32* %v4, align 4, !tbaa !0
+  %v131 = load i32, i32* %v10, align 4, !tbaa !0
+  store volatile i32 %v131, i32* %v4, align 4, !tbaa !0
+  %v132 = load i32, i32* %v11, align 8, !tbaa !0
+  store volatile i32 %v132, i32* %v4, align 4, !tbaa !0
+  %v133 = load i32, i32* %v12, align 4, !tbaa !0
+  store volatile i32 %v133, i32* %v4, align 4, !tbaa !0
+  %v134 = load i32, i32* %v13, align 8, !tbaa !0
+  store volatile i32 %v134, i32* %v4, align 4, !tbaa !0
+  %v135 = load i32, i32* %v14, align 4, !tbaa !0
+  store volatile i32 %v135, i32* %v4, align 4, !tbaa !0
+  %v136 = add nsw i32 %v112, 2
+  %v137 = icmp slt i32 %v136, %v20
+  br i1 %v137, label %b4, label %b5
+}
+
+; Function Attrs: nounwind
+define void @f1(i32 %a0, i32 %a1) #0 {
+b0:
+  tail call void @f0(i32 %a0, i32 %a1)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/cfi-late-and-regpressure-init.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cfi-late-and-regpressure-init.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cfi-late-and-regpressure-init.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/cfi-late-and-regpressure-init.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,62 @@
+; RUN: llc -march=hexagon -enable-misched=true < %s | FileCheck %s
+; This test checks the delayed emission of CFI instructions
+; This test also checks the proper initialization of RegisterPressureTracker.
+; The RegisterPressureTracker must skip debug instructions upon entry of a BB
+
+target triple = "hexagon-unknown--elf"
+
+; Check that allocframe was packetized with the two adds.
+; CHECK: f0:
+; CHECK: {
+; CHECK-DAG: allocframe
+; CHECK-DAG: add
+; CHECK-DAG: add
+; CHECK: }
+; CHECK: dealloc_return
+; CHECK: }
+
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1) #0 !dbg !5 {
+b0:
+  call void @llvm.dbg.value(metadata i32 %a0, metadata !10, metadata !DIExpression()), !dbg !12
+  call void @llvm.dbg.value(metadata i32 %a1, metadata !11, metadata !DIExpression()), !dbg !13
+  %v0 = add nsw i32 %a0, 1, !dbg !14
+  %v1 = add nsw i32 %a1, 1, !dbg !15
+  %v2 = tail call i32 @f1(i32 %v0, i32 %v1) #3, !dbg !16
+  %v3 = add nsw i32 %v2, 1, !dbg !17
+  ret i32 %v3, !dbg !18
+}
+
+declare i32 @f1(i32, i32) #1
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #2
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { "target-cpu"="hexagonv55" }
+attributes #2 = { nounwind readnone speculatable }
+attributes #3 = { nounwind }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "test.c", directory: "/test")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 3, type: !6, isLocal: false, isDefinition: true, scopeLine: 3, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !9)
+!6 = !DISubroutineType(types: !7)
+!7 = !{!8, !8, !8}
+!8 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!9 = !{!10, !11}
+!10 = !DILocalVariable(name: "x", arg: 1, scope: !5, file: !1, line: 3, type: !8)
+!11 = !DILocalVariable(name: "y", arg: 2, scope: !5, file: !1, line: 3, type: !8)
+!12 = !DILocation(line: 3, column: 13, scope: !5)
+!13 = !DILocation(line: 3, column: 20, scope: !5)
+!14 = !DILocation(line: 4, column: 15, scope: !5)
+!15 = !DILocation(line: 4, column: 20, scope: !5)
+!16 = !DILocation(line: 4, column: 10, scope: !5)
+!17 = !DILocation(line: 4, column: 24, scope: !5)
+!18 = !DILocation(line: 4, column: 3, scope: !5)

Added: llvm/trunk/test/CodeGen/Hexagon/cfi_offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cfi_offset.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cfi_offset.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/cfi_offset.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,78 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: .cfi_def_cfa r30
+; CHECK: .cfi_offset r31
+; CHECK: .cfi_offset r30
+
+ at g0 = global i32 0, align 4
+ at g1 = external constant i8*
+
+define i32 @f0() personality i8* bitcast (i32 (...)* @f3 to i8*) {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i8*
+  %v2 = alloca i32
+  %v3 = alloca i32, align 4
+  store i32 0, i32* %v0
+  %v4 = call i8* @f1(i32 4) #1
+  %v5 = bitcast i8* %v4 to i32*
+  store i32 20, i32* %v5
+  invoke void @f2(i8* %v4, i8* bitcast (i8** @g1 to i8*), i8* null) #2
+          to label %b6 unwind label %b1
+
+b1:                                               ; preds = %b0
+  %v6 = landingpad { i8*, i32 }
+          catch i8* bitcast (i8** @g1 to i8*)
+  %v7 = extractvalue { i8*, i32 } %v6, 0
+  store i8* %v7, i8** %v1
+  %v8 = extractvalue { i8*, i32 } %v6, 1
+  store i32 %v8, i32* %v2
+  br label %b2
+
+b2:                                               ; preds = %b1
+  %v9 = load i32, i32* %v2
+  %v10 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g1 to i8*)) #1
+  %v11 = icmp eq i32 %v9, %v10
+  br i1 %v11, label %b3, label %b5
+
+b3:                                               ; preds = %b2
+  %v12 = load i8*, i8** %v1
+  %v13 = call i8* @f4(i8* %v12) #1
+  %v14 = bitcast i8* %v13 to i32*
+  %v15 = load i32, i32* %v14, align 4
+  store i32 %v15, i32* %v3, align 4
+  %v16 = load i32, i32* %v3, align 4
+  store i32 %v16, i32* @g0, align 4
+  call void @f5() #1
+  br label %b4
+
+b4:                                               ; preds = %b3
+  %v17 = load i32, i32* @g0, align 4
+  ret i32 %v17
+
+b5:                                               ; preds = %b2
+  %v18 = load i8*, i8** %v1
+  %v19 = load i32, i32* %v2
+  %v20 = insertvalue { i8*, i32 } undef, i8* %v18, 0
+  %v21 = insertvalue { i8*, i32 } %v20, i32 %v19, 1
+  resume { i8*, i32 } %v21
+
+b6:                                               ; preds = %b0
+  unreachable
+}
+
+declare i8* @f1(i32)
+
+declare void @f2(i8*, i8*, i8*)
+
+declare i32 @f3(...)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.eh.typeid.for(i8*) #0
+
+declare i8* @f4(i8*)
+
+declare void @f5()
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
+attributes #2 = { noreturn }

Added: llvm/trunk/test/CodeGen/Hexagon/cfi_offset2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cfi_offset2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cfi_offset2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/cfi_offset2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,117 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: .cfi_offset r31, -4
+; CHECK: .cfi_offset r30, -8
+; CHECK: .cfi_offset r17, -12
+; CHECK: .cfi_offset r16, -16
+
+ at g0 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+ at g1 = external constant i8*
+ at g2 = private unnamed_addr constant [15 x i8] c"blah blah blah\00", align 1
+ at g3 = external constant i8*
+ at g4 = private unnamed_addr constant [2 x i8] c"{\00"
+ at g5 = private unnamed_addr constant [2 x i8] c"}\00"
+ at g6 = private unnamed_addr constant [27 x i8] c"FAIL:Unexpected exception.\00"
+
+; Function Attrs: nounwind
+declare i32 @f0(i8* nocapture readonly, ...) #0
+
+; Function Attrs: nounwind
+define void @f1(i32 %a0) #0 {
+b0:
+  %v0 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i32 %a0)
+  ret void
+}
+
+define i32 @f2(i32 %a0, i8** nocapture readnone %a1) personality i8* bitcast (i32 (...)* @f5 to i8*) {
+b0:
+  %v0 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i32 %a0) #0
+  %v1 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g4, i32 0, i32 0)) #0
+  %v2 = tail call i8* @f3(i32 4) #0
+  %v3 = bitcast i8* %v2 to i8**
+  store i8* getelementptr inbounds ([15 x i8], [15 x i8]* @g2, i32 0, i32 0), i8** %v3, align 4, !tbaa !0
+  invoke void @f4(i8* %v2, i8* bitcast (i8** @g1 to i8*), i8* null) #2
+          to label %b9 unwind label %b1
+
+b1:                                               ; preds = %b0
+  %v4 = landingpad { i8*, i32 }
+          catch i8* bitcast (i8** @g1 to i8*)
+          catch i8* null
+  %v5 = extractvalue { i8*, i32 } %v4, 0
+  %v6 = extractvalue { i8*, i32 } %v4, 1
+  %v7 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g1 to i8*)) #0
+  %v8 = icmp eq i32 %v6, %v7
+  %v9 = tail call i8* @f6(i8* %v5) #0
+  br i1 %v8, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  tail call void @f7() #0
+  br label %b4
+
+b3:                                               ; preds = %b1
+  %v10 = tail call i32 @f8(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g6, i32 0, i32 0))
+  tail call void @f7()
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v11 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g5, i32 0, i32 0)) #0
+  %v12 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g4, i32 0, i32 0)) #0
+  %v13 = tail call i8* @f3(i32 4) #0
+  %v14 = bitcast i8* %v13 to i32*
+  store i32 777, i32* %v14, align 4, !tbaa !4
+  invoke void @f4(i8* %v13, i8* bitcast (i8** @g3 to i8*), i8* null) #2
+          to label %b9 unwind label %b5
+
+b5:                                               ; preds = %b4
+  %v15 = landingpad { i8*, i32 }
+          catch i8* bitcast (i8** @g3 to i8*)
+          catch i8* null
+  %v16 = extractvalue { i8*, i32 } %v15, 0
+  %v17 = extractvalue { i8*, i32 } %v15, 1
+  %v18 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g3 to i8*)) #0
+  %v19 = icmp eq i32 %v17, %v18
+  %v20 = tail call i8* @f6(i8* %v16) #0
+  br i1 %v19, label %b6, label %b7
+
+b6:                                               ; preds = %b5
+  tail call void @f7() #0
+  br label %b8
+
+b7:                                               ; preds = %b5
+  %v21 = tail call i32 @f8(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @g6, i32 0, i32 0))
+  tail call void @f7()
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6
+  %v22 = tail call i32 @f8(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @g5, i32 0, i32 0)) #0
+  ret i32 0
+
+b9:                                               ; preds = %b4, %b0
+  unreachable
+}
+
+declare i8* @f3(i32)
+
+declare void @f4(i8*, i8*, i8*)
+
+declare i32 @f5(...)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.eh.typeid.for(i8*) #1
+
+declare i8* @f6(i8*)
+
+declare void @f7()
+
+; Function Attrs: nounwind
+declare i32 @f8(i8* nocapture readonly) #0
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noreturn }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/check-dot-new.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/check-dot-new.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/check-dot-new.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/check-dot-new.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,16 @@
+; RUN: llc  -march=hexagon  -O3 -hexagon-small-data-threshold=0 -disable-hexagon-misched < %s | FileCheck %s
+; CHECK-LABEL: f0
+; CHECK-DAG: [[REG:r[0-9]+]] = add
+; CHECK-DAG: memw(##g0) = [[REG]].new
+
+ at g0 = external global i32, align 8
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = add i32 %a0, 1
+  store i32 %v0, i32* @g0, align 4
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/circ_pcr_assert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/circ_pcr_assert.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/circ_pcr_assert.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/circ_pcr_assert.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,35 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+; The test case validates the fact that if the modifier register value "-268430336"
+; is passed as target constant, then the compiler must not assert.
+; This test also validates that the VLIW Packetizer does not bail out the compilation
+; with "Unknown .new type" when attempting to validate if the circular store can be
+; converted to a new value store.
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define zeroext i8 @f0(i8* %a0) local_unnamed_addr #0 {
+b0:
+  %v0 = tail call { i32, i8* } @llvm.hexagon.L2.loadrub.pcr(i8* %a0, i32 -268430336, i8* %a0)
+  %v1 = extractvalue { i32, i8* } %v0, 0
+  %v2 = trunc i32 %v1 to i8
+  ret i8 %v2
+}
+
+; Function Attrs: argmemonly nounwind
+declare { i32, i8* } @llvm.hexagon.L2.loadrub.pcr(i8*, i32, i8* nocapture) #1
+
+; Function Attrs: nounwind
+define void @f1(i8* %a0, i8 zeroext %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = zext i8 %a1 to i32
+  %v1 = tail call i8* @llvm.hexagon.S2.storerb.pcr(i8* %a0, i32 -268430336, i32 %v0, i8* %a0)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.S2.storerb.pcr(i8*, i32, i32, i8* nocapture) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/cmpb_gtu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cmpb_gtu.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cmpb_gtu.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/cmpb_gtu.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,123 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: cmpb.gtu
+
+target triple = "hexagon"
+
+%s.0 = type { void (i8)*, void (i8)*, void (i8)*, void (i8)* }
+%s.1 = type { i8 (i8)*, void (i8)* }
+%s.2 = type { i8 (i8, %s.3*)*, i8 (i8)*, i8 (i8)*, i8 (i8)*, i8 (i8)*, i8 (i8)*, i8 (i16)*, i8 (i8)*, i8 (i16)*, i8 (i8)* }
+%s.3 = type { %s.4, [2 x %s.5], i8, %s.7, %s.19, i8, %s.8, i16, [6 x %s.14], %s.17, %s.18, %s.19 }
+%s.4 = type { i16, i8, i8* }
+%s.5 = type { i16, i8, i8, i8, i8, i8, i8, i8, i16, i8, i8, i8, i16, %s.6 }
+%s.6 = type { i8, i16, i16, i8, i8 }
+%s.7 = type { i8, i8, i8, i8, i64, i64 }
+%s.8 = type { i16, %s.9, i32, %s.10*, i8, i8 }
+%s.9 = type { i8, i8, i8, i8, i8, i8, i8, i8, i8, i8 }
+%s.10 = type { i8, [14 x i8], [14 x %s.11*] }
+%s.11 = type { i8, i8, i8, i8, i8, i16, i8, i8, i8, i8, i8, i8, i8, i8, %s.12, %s.13 }
+%s.12 = type { i16, i8, i8, i8, i16, i8, i8 }
+%s.13 = type { i8, i8, i8, i8, i8, i8 }
+%s.14 = type { i16, %s.15 }
+%s.15 = type { i16, i8, i16, i16, i16, i16, [1 x %s.16], i8, i8, i8, i32 }
+%s.16 = type { i8, i16, i16, i8 }
+%s.17 = type { i16, i16, i16, i8, i8 }
+%s.18 = type { i8, i8, i32 }
+%s.19 = type { i8, i8, i8, i8 }
+%s.22 = type { %s.23, %s.24 }
+%s.23 = type { i8, i8, i8, i8, i8, i8, i8, %s.0*, %s.1*, %s.2*, i8 }
+%s.24 = type { i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i8, i8 }
+%s.25 = type { %s.26 }
+%s.26 = type { i8, i8, i32 }
+
+ at g0 = external global %s.0
+ at g1 = external global %s.1
+ at g2 = external global %s.2
+ at g3 = external global %s.0
+ at g4 = external global %s.0
+ at g5 = external global %s.0
+ at g6 = external global %s.0
+ at g7 = external global %s.0
+ at g8 = external global %s.1
+ at g9 = external global %s.2
+ at g10 = external global %s.0
+ at g11 = external global %s.0
+ at g12 = external global %s.1
+ at g13 = external global %s.2
+ at g14 = external global %s.0
+ at g15 = external global %s.1
+ at g16 = external global %s.2
+ at g17 = external global %s.0
+ at g18 = external global %s.2
+ at g19 = common global [6 x %s.22] zeroinitializer, align 8
+ at g20 = common global %s.25 zeroinitializer, align 4
+
+declare void @f0()
+
+declare void @f1()
+
+declare void @f2()
+
+declare void @f3()
+
+declare void @f4()
+
+declare void @f5(i8 zeroext)
+
+declare void @f6()
+
+; Function Attrs: nounwind
+define void @f7() #0 {
+b0:
+  %v0 = load i8, i8* getelementptr inbounds (%s.25, %s.25* @g20, i32 0, i32 0, i32 1), align 1, !tbaa !0
+  %v1 = icmp eq i8 %v0, 1
+  br label %b1
+
+b1:                                               ; preds = %b5, %b0
+  %v2 = phi i32 [ 0, %b0 ], [ %v14, %b5 ]
+  %v3 = getelementptr inbounds [6 x %s.22], [6 x %s.22]* @g19, i32 0, i32 %v2, i32 1, i32 4
+  %v4 = load i8, i8* %v3, align 2, !tbaa !0
+  %v5 = icmp eq i8 %v4, 1
+  br i1 %v5, label %b2, label %b5
+
+b2:                                               ; preds = %b1
+  br i1 %v1, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  %v6 = getelementptr inbounds [6 x %s.22], [6 x %s.22]* @g19, i32 0, i32 %v2, i32 1, i32 6
+  %v7 = load i8, i8* %v6, align 4, !tbaa !0
+  %v8 = add i8 %v7, -2
+  %v9 = icmp ult i8 %v8, 44
+  br i1 %v9, label %b5, label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v10 = shl i32 1, %v2
+  %v11 = load i32, i32* getelementptr inbounds (%s.25, %s.25* @g20, i32 0, i32 0, i32 2), align 4, !tbaa !3
+  %v12 = or i32 %v11, %v10
+  store i32 %v12, i32* getelementptr inbounds (%s.25, %s.25* @g20, i32 0, i32 0, i32 2), align 4, !tbaa !3
+  %v13 = getelementptr inbounds [6 x %s.22], [6 x %s.22]* @g19, i32 0, i32 %v2, i32 1, i32 13
+  store i8 1, i8* %v13, align 4, !tbaa !0
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3, %b1
+  %v14 = add i32 %v2, 1
+  %v15 = trunc i32 %v14 to i8
+  %v16 = icmp eq i8 %v15, 6
+  br i1 %v16, label %b6, label %b1
+
+b6:                                               ; preds = %b5
+  ret void
+}
+
+declare void @f8(i8 zeroext)
+
+declare void @f9()
+
+declare void @f10()
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"long", !1}

Added: llvm/trunk/test/CodeGen/Hexagon/cmpbeq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cmpbeq.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cmpbeq.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/cmpbeq.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Check that we generate 'cmpb.eq' instruction for a byte comparision.
+
+ at g0 = common global i8 0, align 1
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = xor i32 %a1, %a0
+  %v1 = and i32 %v0, 255
+  %v2 = icmp eq i32 %v1, 0
+  br i1 %v2, label %b1, label %b2
+; CHECK-NOT: xor(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-NOT: zxtb(r{{[0-9]+}})
+; CHECK: cmpb.eq(r{{[0-9]+}},r{{[0-9]+}})
+
+b1:                                               ; preds = %b0
+  %v3 = trunc i32 %a0 to i8
+  store i8 %v3, i8* @g0, align 1
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v4 = phi i32 [ 1, %b1 ], [ 0, %b0 ]
+  ret i32 %v4
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/cmpy-round.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/cmpy-round.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/cmpy-round.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/cmpy-round.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,47 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+; r2 = round(r1:0):sat
+; r3 = cmpyiwh(r1:0, r2):<<1:rnd:sat
+; r0 = cmpyiwh(r1:0, r2*):<<1:rnd:sat
+
+; CHECK: round(r{{[0-9]*}}:{{[0-9]*}}):sat
+; CHECK: cmpyiwh(r{{[0-9]*}}:{{[0-9]*}},r{{[0-9]*}}):<<1:rnd:sat
+; CHECK: cmpyrwh(r{{[0-9]*}}:{{[0-9]*}},r{{[0-9]*}}*):<<1:rnd:sat
+; CHECK: cmpyiwh(r{{[0-9]*}}:{{[0-9]*}},r{{[0-9]*}}*):<<1:rnd:sat
+
+target triple = "hexagon"
+
+ at g0 = global i32 0, align 4
+ at g1 = global i32 0, align 4
+ at g2 = global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  store i32 0, i32* %v0
+  store i32 0, i32* %v1, align 4
+  %v2 = call i32 @llvm.hexagon.A2.roundsat(i64 1)
+  store i32 %v2, i32* @g1, align 4
+  %v3 = call i32 @llvm.hexagon.M4.cmpyi.wh(i64 -2147483648, i32 -2147483648)
+  store i32 %v3, i32* @g0, align 4
+  %v4 = call i32 @llvm.hexagon.M4.cmpyr.whc(i64 2147483647, i32 2147483647)
+  store i32 %v4, i32* @g2, align 4
+  %v5 = call i32 @llvm.hexagon.M4.cmpyi.whc(i64 -2147483648, i32 -2147483648)
+  ret i32 %v5
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.roundsat(i64) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M4.cmpyi.wh(i64, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M4.cmpyr.whc(i64, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M4.cmpyi.whc(i64, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/coalesce_tfri.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/coalesce_tfri.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/coalesce_tfri.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/coalesce_tfri.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,124 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+ at g0 = external global i32
+ at g1 = external global i32, align 4
+ at g2 = external hidden unnamed_addr constant [49 x i8], align 8
+ at g3 = external hidden unnamed_addr constant [76 x i8], align 8
+ at g4 = external unnamed_addr constant { i8*, i8* }
+ at g5 = external hidden unnamed_addr constant [36 x i8], align 8
+
+declare void @f0()
+
+declare i32 @f1()
+
+declare i32 @f2(i32)
+
+declare void @f3()
+
+; Function Attrs: nounwind
+declare void ()* @f4(void ()*) #0
+
+; Function Attrs: nounwind
+declare void ()* @f5(void ()*) #0
+
+; CHECK: f6:
+; CHECK-DAG: call f4
+; CHECK-DAG: r0 = ##f3
+; CHECK-DAG: call f5
+; CHECK-DAG: r0 = ##f0
+; CHECK-DAG: call f8
+; CHECK-DAG: r0 = ##g2
+; CHECK-DAG: call f9
+; CHECK-DAG: call f8
+; CHECK-DAG: r0 = ##g3
+; CHECK-DAG: call f10
+; CHECK-DAG: r0 = #4
+; CHECK-DAG: r{{[0-9]+}} = ##g1
+define i32 @f6() personality i8* bitcast (i32 (...)* @f11 to i8*) {
+b0:
+  tail call void @f7()
+  %v0 = tail call void ()* @f4(void ()* @f3) #0
+  %v1 = tail call void ()* @f5(void ()* @f0) #0
+  tail call void (i8*, ...) @f8(i8* getelementptr inbounds ([49 x i8], [49 x i8]* @g2, i32 0, i32 0))
+  tail call void @f9()
+  tail call void (i8*, ...) @f8(i8* getelementptr inbounds ([76 x i8], [76 x i8]* @g3, i32 0, i32 0))
+  %v2 = tail call i8* @f10(i32 4) #0
+  %v3 = load i32, i32* @g1, align 4, !tbaa !0
+  %v4 = add nsw i32 %v3, 1
+  store i32 %v4, i32* @g1, align 4, !tbaa !0
+  invoke void @f12(i8* %v2, i8* bitcast ({ i8*, i8* }* @g4 to i8*), i8* null) #1
+          to label %b7 unwind label %b1
+
+b1:                                               ; preds = %b0
+  %v5 = landingpad { i8*, i32 }
+          catch i8* null
+  %v6 = extractvalue { i8*, i32 } %v5, 0
+  %v7 = tail call i8* @f13(i8* %v6) #0
+  store i32 0, i32* @g1, align 4, !tbaa !0
+  invoke void @f14() #1
+          to label %b7 unwind label %b2
+
+b2:                                               ; preds = %b1
+  %v8 = landingpad { i8*, i32 }
+          catch i8* null
+  invoke void @f15()
+          to label %b3 unwind label %b6
+
+b3:                                               ; preds = %b2
+  %v9 = extractvalue { i8*, i32 } %v8, 0
+  %v10 = tail call i8* @f13(i8* %v9) #0
+  tail call void @f15()
+  %v11 = load i32, i32* @g1, align 4, !tbaa !0
+  %v12 = icmp eq i32 %v11, 0
+  br i1 %v12, label %b5, label %b4
+
+b4:                                               ; preds = %b3
+  tail call void (i8*, ...) @f8(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @g5, i32 0, i32 0))
+  store i32 1, i32* @g0, align 4, !tbaa !0
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3
+  %v13 = tail call i32 @f1()
+  %v14 = tail call i32 @f2(i32 1)
+  ret i32 %v14
+
+b6:                                               ; preds = %b2
+  %v15 = landingpad { i8*, i32 }
+          catch i8* null
+  tail call void @f16() #2
+  unreachable
+
+b7:                                               ; preds = %b1, %b0
+  unreachable
+}
+
+declare void @f7()
+
+declare void @f8(i8*, ...)
+
+declare void @f9()
+
+declare i8* @f10(i32)
+
+declare i32 @f11(...)
+
+declare void @f12(i8*, i8*, i8*)
+
+declare i8* @f13(i8*)
+
+declare void @f14()
+
+declare void @f15()
+
+declare void @f16()
+
+attributes #0 = { nounwind }
+attributes #1 = { noreturn }
+attributes #2 = { noreturn nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,16 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0(i32* %a0, i32* %a1) #0 {
+b0:
+; We want to see a #-22 in combine, not ##-22.
+; CHECK: combine(#5,#-22)
+  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32*, i32*, i32, i32)*)(i32* %a0, i32* %a1, i32 -22, i32 5) #0
+  ret i32 %v0
+}
+
+declare i32 @f1(...)
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/combine-imm-ext2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,16 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0(i32* %a0, i32* %a1) #0 {
+b0:
+; We want to see a ##24576 in combine, not #24576.
+; CHECK: combine(#5,##24576)
+  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 (i32*, i32*, i16, i16)*)(i32* %a0, i32* %a1, i16 24576, i16 5) #0
+  ret i32 %v0
+}
+
+declare i32 @f1(...)
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/combine_lh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/combine_lh.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/combine_lh.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/combine_lh.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,14 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: combine(r{{[0-9]+}}.l,r{{[0-9]+}}.h)
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i64 %a0) #0 {
+b0:
+  %v0 = lshr i64 %a0, 16
+  %v1 = trunc i64 %v0 to i32
+  ret i32 %v1
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/combiner-lts.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/combiner-lts.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/combiner-lts.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/combiner-lts.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,66 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; In DAG combiner, eliminate a store in cases where the store is fed by a
+; load from the same location.  This is already done in cases where the store's
+; chain reaches the "output chain" of the load, this tests for cases where
+; the load's "input chain" is reached via an intervening node (eg. TokenFactor)
+; that ensures ordering.
+
+target triple = "hexagon"
+
+%s.0 = type { [3 x i32] }
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i32 %a1, %s.0* nocapture %a2, %s.0* nocapture %a3) #0 {
+b0:
+; Pick one store that happens as a result.  This isn't the best, but a regular
+; expression for a register name matches some unrelated load.
+; CHECK: %bb.
+; CHECK: = memw(r3+#8)
+; CHECK-NOT: memw(r3+#8) =
+; CHECK: %bb.
+  %v0 = bitcast %s.0* %a2 to i8*
+  %v1 = bitcast %s.0* %a3 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v0, i8* align 4 %v1, i32 12, i1 false)
+  %v2 = bitcast %s.0* %a2 to i96*
+  %v3 = zext i32 %a0 to i96
+  %v4 = load i96, i96* %v2, align 4
+  %v5 = shl nuw nsw i96 %v3, 48
+  %v6 = and i96 %v5, 281474976710656
+  %v7 = and i96 %v4, -281474976710657
+  %v8 = or i96 %v7, %v6
+  store i96 %v8, i96* %v2, align 4
+  %v9 = icmp eq i32 %a1, 2147483647
+  br i1 %v9, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v10 = and i96 %v8, -12582913
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v11 = bitcast %s.0* %a3 to i96*
+  %v12 = load i96, i96* %v11, align 4
+  %v13 = trunc i96 %v12 to i32
+  %v14 = add i32 %v13, %a1
+  %v15 = zext i32 %v14 to i96
+  %v16 = and i96 %v15, 4194303
+  %v17 = and i96 %v8, -4194304
+  %v18 = or i96 %v16, %v17
+  store i96 %v18, i96* %v2, align 4
+  %v19 = load i96, i96* %v11, align 4
+  %v20 = and i96 %v19, 12582912
+  %v21 = and i96 %v18, -12582913
+  %v22 = or i96 %v21, %v20
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v23 = phi i96 [ %v22, %b2 ], [ %v10, %b1 ]
+  store i96 %v23, i96* %v2, align 4
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/common-global-addr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/common-global-addr.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/common-global-addr.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/common-global-addr.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,17 @@
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 -disable-hexagon-amodeopt -hexagon-cext-threshold=1 < %s | FileCheck %s
+; Check commoning of global addresses.
+
+ at g0 = external global i32
+
+; Function Attrs: nounwind
+define zeroext i32 @f0() #0 {
+b0:
+; CHECK: ##g0
+; CHECK-NOT: ##g0
+  %v0 = load i32, i32* @g0, align 1
+  %v1 = mul nsw i32 100, %v0
+  store i32 %v1, i32* @g0, align 1
+  ret i32 %v1
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/concat-vectors-legalize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/concat-vectors-legalize.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/concat-vectors-legalize.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/concat-vectors-legalize.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,828 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: setbit(r{{[0-9]+}},#1)
+
+target triple = "hexagon-unknown--elf"
+
+%s.8 = type { i8*, i32, i32, i32, i32, %s.9*, %s.9*, %s.9* }
+%s.9 = type { %s.10 }
+%s.10 = type { i64 }
+%s.4 = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+
+ at g0 = private constant [6 x i8] c"input\00", align 32
+ at g1 = private constant [11 x i8] c"gaussian11\00", align 32
+ at g2 = private constant [2 x %s.8] [%s.8 { i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g0, i32 0, i32 0), i32 1, i32 2, i32 1, i32 8, %s.9* null, %s.9* null, %s.9* null }, %s.8 { i8* getelementptr inbounds ([11 x i8], [11 x i8]* @g1, i32 0, i32 0), i32 2, i32 2, i32 1, i32 8, %s.9* null, %s.9* null, %s.9* null }]
+ at g3 = private constant [53 x i8] c"hexagon-32-os_unknown-no_asserts-no_bounds_query-hvx\00", align 32
+
+; Function Attrs: nounwind
+declare i8* @f0(i8*, i32) #0
+
+; Function Attrs: nounwind
+declare void @f1(i8*, i8*) #0
+
+; Function Attrs: nounwind
+declare noalias i8* @f2(i8*, i32) #0
+
+; Function Attrs: nounwind
+declare void @f3(i8*, i8*) #0
+
+; Function Attrs: nounwind
+declare void @f4() #0
+
+; Function Attrs: nounwind
+declare void @f5() #0
+
+; Function Attrs: nounwind
+define i32 @f6(%s.4* noalias nocapture readonly %a0, %s.4* noalias nocapture readonly %a1) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 1
+  %v1 = load i8*, i8** %v0
+  %v2 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 2, i32 0
+  %v3 = load i32, i32* %v2
+  %v4 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 2, i32 1
+  %v5 = load i32, i32* %v4
+  %v6 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 3, i32 1
+  %v7 = load i32, i32* %v6
+  %v8 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 4, i32 0
+  %v9 = load i32, i32* %v8
+  %v10 = getelementptr inbounds %s.4, %s.4* %a0, i32 0, i32 4, i32 1
+  %v11 = load i32, i32* %v10
+  %v12 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 1
+  %v13 = load i8*, i8** %v12
+  %v14 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 2, i32 0
+  %v15 = load i32, i32* %v14
+  %v16 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 2, i32 1
+  %v17 = load i32, i32* %v16
+  %v18 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 3, i32 1
+  %v19 = load i32, i32* %v18
+  %v20 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 4, i32 0
+  %v21 = load i32, i32* %v20
+  %v22 = getelementptr inbounds %s.4, %s.4* %a1, i32 0, i32 4, i32 1
+  %v23 = load i32, i32* %v22
+  %v24 = add nsw i32 %v21, %v15
+  %v25 = add nsw i32 %v24, -64
+  %v26 = icmp slt i32 %v21, %v25
+  %v27 = select i1 %v26, i32 %v21, i32 %v25
+  %v28 = add nsw i32 %v15, -1
+  %v29 = and i32 %v28, -64
+  %v30 = add i32 %v21, 63
+  %v31 = add i32 %v30, %v29
+  %v32 = add nsw i32 %v24, -1
+  %v33 = icmp slt i32 %v31, %v32
+  %v34 = select i1 %v33, i32 %v31, i32 %v32
+  %v35 = sub nsw i32 %v34, %v27
+  %v36 = icmp slt i32 %v24, %v34
+  %v37 = select i1 %v36, i32 %v34, i32 %v24
+  %v38 = add nsw i32 %v37, -1
+  %v39 = icmp slt i32 %v38, %v34
+  %v40 = select i1 %v39, i32 %v34, i32 %v38
+  %v41 = add nsw i32 %v17, 1
+  %v42 = sext i32 %v41 to i64
+  %v43 = sub nsw i32 %v40, %v27
+  %v44 = add nsw i32 %v43, 2
+  %v45 = sext i32 %v44 to i64
+  %v46 = mul nsw i64 %v45, %v42
+  %v47 = trunc i64 %v46 to i32
+  %v48 = tail call i8* @f2(i8* null, i32 %v47)
+  %v49 = add nsw i32 %v23, -1
+  %v50 = add i32 %v23, %v17
+  %v51 = icmp sgt i32 %v23, %v50
+  br i1 %v51, label %b12, label %b1, !prof !3
+
+b1:                                               ; preds = %b11, %b0
+  %v52 = phi i32 [ %v220, %b11 ], [ %v49, %b0 ]
+  %v53 = icmp slt i32 %v9, %v24
+  %v54 = select i1 %v53, i32 %v9, i32 %v24
+  %v55 = add nsw i32 %v21, -1
+  %v56 = icmp slt i32 %v54, %v55
+  %v57 = select i1 %v56, i32 %v55, i32 %v54
+  %v58 = add nsw i32 %v9, %v3
+  %v59 = icmp slt i32 %v58, %v24
+  %v60 = select i1 %v59, i32 %v58, i32 %v24
+  %v61 = icmp slt i32 %v60, %v57
+  %v62 = select i1 %v61, i32 %v57, i32 %v60
+  %v63 = icmp slt i32 %v57, %v21
+  br i1 %v63, label %b7, label %b2, !prof !3
+
+b2:                                               ; preds = %b1
+  %v64 = add nsw i32 %v11, %v5
+  %v65 = add nsw i32 %v64, -1
+  %v66 = icmp slt i32 %v52, %v65
+  br i1 %v66, label %b3, label %b4
+
+b3:                                               ; preds = %b3, %b2
+  %v67 = phi i32 [ %v96, %b3 ], [ %v55, %b2 ]
+  %v68 = mul nsw i32 %v11, %v7
+  %v69 = icmp slt i32 %v52, %v11
+  %v70 = select i1 %v69, i32 %v11, i32 %v52
+  %v71 = mul nsw i32 %v70, %v7
+  %v72 = add nsw i32 %v58, -1
+  %v73 = icmp slt i32 %v67, %v72
+  %v74 = select i1 %v73, i32 %v67, i32 %v72
+  %v75 = icmp slt i32 %v74, %v9
+  %v76 = select i1 %v75, i32 %v9, i32 %v74
+  %v77 = add i32 %v68, %v9
+  %v78 = sub i32 %v71, %v77
+  %v79 = add i32 %v78, %v76
+  %v80 = getelementptr inbounds i8, i8* %v1, i32 %v79
+  %v81 = load i8, i8* %v80, align 1, !tbaa !4
+  %v82 = icmp sle i32 %v64, %v52
+  %v83 = icmp sle i32 %v58, %v67
+  %v84 = icmp slt i32 %v67, %v9
+  %v85 = or i1 %v84, %v83
+  %v86 = or i1 %v69, %v85
+  %v87 = or i1 %v82, %v86
+  %v88 = select i1 %v87, i8 0, i8 %v81
+  %v89 = sub i32 1, %v23
+  %v90 = add i32 %v89, %v52
+  %v91 = mul nsw i32 %v90, %v44
+  %v92 = sub i32 1, %v27
+  %v93 = add i32 %v92, %v91
+  %v94 = add i32 %v93, %v67
+  %v95 = getelementptr inbounds i8, i8* %v48, i32 %v94
+  store i8 %v88, i8* %v95, align 1, !tbaa !7
+  %v96 = add nsw i32 %v67, 1
+  %v97 = icmp eq i32 %v96, %v57
+  br i1 %v97, label %b7, label %b3
+
+b4:                                               ; preds = %b2
+  %v98 = icmp slt i32 %v5, 1
+  br i1 %v98, label %b5, label %b6
+
+b5:                                               ; preds = %b5, %b4
+  %v99 = phi i32 [ %v123, %b5 ], [ %v55, %b4 ]
+  %v100 = add nsw i32 %v58, -1
+  %v101 = icmp slt i32 %v99, %v100
+  %v102 = select i1 %v101, i32 %v99, i32 %v100
+  %v103 = icmp slt i32 %v102, %v9
+  %v104 = select i1 %v103, i32 %v9, i32 %v102
+  %v105 = sub i32 %v104, %v9
+  %v106 = getelementptr inbounds i8, i8* %v1, i32 %v105
+  %v107 = load i8, i8* %v106, align 1, !tbaa !4
+  %v108 = icmp sle i32 %v64, %v52
+  %v109 = icmp slt i32 %v52, %v11
+  %v110 = icmp sle i32 %v58, %v99
+  %v111 = icmp slt i32 %v99, %v9
+  %v112 = or i1 %v111, %v110
+  %v113 = or i1 %v109, %v112
+  %v114 = or i1 %v108, %v113
+  %v115 = select i1 %v114, i8 0, i8 %v107
+  %v116 = sub i32 1, %v23
+  %v117 = add i32 %v116, %v52
+  %v118 = mul nsw i32 %v117, %v44
+  %v119 = sub i32 1, %v27
+  %v120 = add i32 %v119, %v118
+  %v121 = add i32 %v120, %v99
+  %v122 = getelementptr inbounds i8, i8* %v48, i32 %v121
+  store i8 %v115, i8* %v122, align 1, !tbaa !7
+  %v123 = add nsw i32 %v99, 1
+  %v124 = icmp eq i32 %v123, %v57
+  br i1 %v124, label %b7, label %b5
+
+b6:                                               ; preds = %b6, %b4
+  %v125 = phi i32 [ %v153, %b6 ], [ %v55, %b4 ]
+  %v126 = mul nsw i32 %v11, %v7
+  %v127 = mul nsw i32 %v65, %v7
+  %v128 = add nsw i32 %v58, -1
+  %v129 = icmp slt i32 %v125, %v128
+  %v130 = select i1 %v129, i32 %v125, i32 %v128
+  %v131 = icmp slt i32 %v130, %v9
+  %v132 = select i1 %v131, i32 %v9, i32 %v130
+  %v133 = add i32 %v126, %v9
+  %v134 = sub i32 %v127, %v133
+  %v135 = add i32 %v134, %v132
+  %v136 = getelementptr inbounds i8, i8* %v1, i32 %v135
+  %v137 = load i8, i8* %v136, align 1, !tbaa !4
+  %v138 = icmp sle i32 %v64, %v52
+  %v139 = icmp slt i32 %v52, %v11
+  %v140 = icmp sle i32 %v58, %v125
+  %v141 = icmp slt i32 %v125, %v9
+  %v142 = or i1 %v141, %v140
+  %v143 = or i1 %v139, %v142
+  %v144 = or i1 %v138, %v143
+  %v145 = select i1 %v144, i8 0, i8 %v137
+  %v146 = sub i32 1, %v23
+  %v147 = add i32 %v146, %v52
+  %v148 = mul nsw i32 %v147, %v44
+  %v149 = sub i32 1, %v27
+  %v150 = add i32 %v149, %v148
+  %v151 = add i32 %v150, %v125
+  %v152 = getelementptr inbounds i8, i8* %v48, i32 %v151
+  store i8 %v145, i8* %v152, align 1, !tbaa !7
+  %v153 = add nsw i32 %v125, 1
+  %v154 = icmp eq i32 %v153, %v57
+  br i1 %v154, label %b7, label %b6
+
+b7:                                               ; preds = %b6, %b5, %b3, %b1
+  %v155 = icmp slt i32 %v57, %v62
+  br i1 %v155, label %b8, label %b9, !prof !9
+
+b8:                                               ; preds = %b8, %b7
+  %v156 = phi i32 [ %v181, %b8 ], [ %v57, %b7 ]
+  %v157 = mul nsw i32 %v11, %v7
+  %v158 = add nsw i32 %v11, %v5
+  %v159 = add nsw i32 %v158, -1
+  %v160 = icmp slt i32 %v52, %v159
+  %v161 = select i1 %v160, i32 %v52, i32 %v159
+  %v162 = icmp slt i32 %v161, %v11
+  %v163 = select i1 %v162, i32 %v11, i32 %v161
+  %v164 = mul nsw i32 %v163, %v7
+  %v165 = add i32 %v157, %v9
+  %v166 = sub i32 %v164, %v165
+  %v167 = add i32 %v166, %v156
+  %v168 = getelementptr inbounds i8, i8* %v1, i32 %v167
+  %v169 = load i8, i8* %v168, align 1, !tbaa !4
+  %v170 = icmp sle i32 %v158, %v52
+  %v171 = icmp slt i32 %v52, %v11
+  %v172 = or i1 %v171, %v170
+  %v173 = select i1 %v172, i8 0, i8 %v169
+  %v174 = sub i32 1, %v23
+  %v175 = add i32 %v174, %v52
+  %v176 = mul nsw i32 %v175, %v44
+  %v177 = sub i32 1, %v27
+  %v178 = add i32 %v177, %v176
+  %v179 = add i32 %v178, %v156
+  %v180 = getelementptr inbounds i8, i8* %v48, i32 %v179
+  store i8 %v173, i8* %v180, align 1, !tbaa !7
+  %v181 = add nsw i32 %v156, 1
+  %v182 = icmp eq i32 %v181, %v62
+  br i1 %v182, label %b9, label %b8
+
+b9:                                               ; preds = %b8, %b7
+  %v183 = icmp slt i32 %v62, %v24
+  br i1 %v183, label %b10, label %b11, !prof !9
+
+b10:                                              ; preds = %b10, %b9
+  %v184 = phi i32 [ %v218, %b10 ], [ %v62, %b9 ]
+  %v185 = mul nsw i32 %v11, %v7
+  %v186 = add nsw i32 %v11, %v5
+  %v187 = add nsw i32 %v186, -1
+  %v188 = icmp slt i32 %v52, %v187
+  %v189 = select i1 %v188, i32 %v52, i32 %v187
+  %v190 = icmp slt i32 %v189, %v11
+  %v191 = select i1 %v190, i32 %v11, i32 %v189
+  %v192 = mul nsw i32 %v191, %v7
+  %v193 = add nsw i32 %v58, -1
+  %v194 = icmp slt i32 %v184, %v193
+  %v195 = select i1 %v194, i32 %v184, i32 %v193
+  %v196 = icmp slt i32 %v195, %v9
+  %v197 = select i1 %v196, i32 %v9, i32 %v195
+  %v198 = add i32 %v185, %v9
+  %v199 = sub i32 %v192, %v198
+  %v200 = add i32 %v199, %v197
+  %v201 = getelementptr inbounds i8, i8* %v1, i32 %v200
+  %v202 = load i8, i8* %v201, align 1, !tbaa !4
+  %v203 = icmp sle i32 %v186, %v52
+  %v204 = icmp slt i32 %v52, %v11
+  %v205 = icmp sle i32 %v58, %v184
+  %v206 = icmp slt i32 %v184, %v9
+  %v207 = or i1 %v206, %v205
+  %v208 = or i1 %v204, %v207
+  %v209 = or i1 %v203, %v208
+  %v210 = select i1 %v209, i8 0, i8 %v202
+  %v211 = sub i32 1, %v23
+  %v212 = add i32 %v211, %v52
+  %v213 = mul nsw i32 %v212, %v44
+  %v214 = sub i32 1, %v27
+  %v215 = add i32 %v214, %v213
+  %v216 = add i32 %v215, %v184
+  %v217 = getelementptr inbounds i8, i8* %v48, i32 %v216
+  store i8 %v210, i8* %v217, align 1, !tbaa !7
+  %v218 = add nsw i32 %v184, 1
+  %v219 = icmp eq i32 %v218, %v24
+  br i1 %v219, label %b11, label %b10
+
+b11:                                              ; preds = %b10, %b9
+  %v220 = add nsw i32 %v52, 1
+  %v221 = icmp eq i32 %v220, %v50
+  br i1 %v221, label %b12, label %b1
+
+b12:                                              ; preds = %b11, %b0
+  %v222 = add nsw i32 %v35, 1
+  %v223 = sext i32 %v222 to i64
+  %v224 = shl nsw i64 %v42, 2
+  %v225 = mul i64 %v224, %v223
+  %v226 = trunc i64 %v225 to i32
+  %v227 = tail call i8* @f2(i8* null, i32 %v226)
+  br i1 %v51, label %b14, label %b13, !prof !3
+
+b13:                                              ; preds = %b19, %b12
+  %v228 = phi i32 [ %v351, %b19 ], [ %v49, %b12 ]
+  %v229 = ashr i32 %v15, 6
+  %v230 = icmp slt i32 %v229, 0
+  %v231 = select i1 %v230, i32 0, i32 %v229
+  %v232 = icmp sgt i32 %v231, 0
+  br i1 %v232, label %b16, label %b17, !prof !9
+
+b14:                                              ; preds = %b19, %b12
+  %v233 = icmp eq i8* %v48, null
+  br i1 %v233, label %b20, label %b15
+
+b15:                                              ; preds = %b14
+  tail call void @f3(i8* null, i8* %v48) #2
+  br label %b20
+
+b16:                                              ; preds = %b16, %b13
+  %v234 = phi i32 [ %v289, %b16 ], [ 0, %b13 ]
+  %v235 = sub nsw i32 %v228, %v23
+  %v236 = add nsw i32 %v235, 1
+  %v237 = mul nsw i32 %v236, %v44
+  %v238 = shl i32 %v234, 6
+  %v239 = sub i32 %v21, %v27
+  %v240 = add i32 %v239, %v238
+  %v241 = add nsw i32 %v240, %v237
+  %v242 = getelementptr inbounds i8, i8* %v48, i32 %v241
+  %v243 = bitcast i8* %v242 to <16 x i32>*
+  %v244 = load <16 x i32>, <16 x i32>* %v243, align 1, !tbaa !7
+  %v245 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v244)
+  %v246 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v245)
+  %v247 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v245)
+  %v248 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v247)
+  %v249 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v246)
+  %v250 = add nsw i32 %v241, 1
+  %v251 = getelementptr inbounds i8, i8* %v48, i32 %v250
+  %v252 = bitcast i8* %v251 to <16 x i32>*
+  %v253 = load <16 x i32>, <16 x i32>* %v252, align 1, !tbaa !7
+  %v254 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v253)
+  %v255 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v254)
+  %v256 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v255)
+  %v257 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v256)
+  %v258 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v256)
+  %v259 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v257, i32 168430090)
+  %v260 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v258, i32 168430090)
+  %v261 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v259, <16 x i32> %v260)
+  %v262 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v254)
+  %v263 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v262)
+  %v264 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v263)
+  %v265 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v263)
+  %v266 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v264, i32 168430090)
+  %v267 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v265, i32 168430090)
+  %v268 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v266, <16 x i32> %v267)
+  %v269 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v248, <32 x i32> %v261)
+  %v270 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v249, <32 x i32> %v268)
+  %v271 = shufflevector <32 x i32> %v269, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v272 = mul nsw i32 %v236, %v222
+  %v273 = add nsw i32 %v240, %v272
+  %v274 = bitcast i8* %v227 to i32*
+  %v275 = getelementptr inbounds i32, i32* %v274, i32 %v273
+  %v276 = bitcast i32* %v275 to <16 x i32>*
+  store <16 x i32> %v271, <16 x i32>* %v276, align 4, !tbaa !10
+  %v277 = shufflevector <32 x i32> %v269, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v278 = add nsw i32 %v273, 16
+  %v279 = getelementptr inbounds i32, i32* %v274, i32 %v278
+  %v280 = bitcast i32* %v279 to <16 x i32>*
+  store <16 x i32> %v277, <16 x i32>* %v280, align 4, !tbaa !10
+  %v281 = shufflevector <32 x i32> %v270, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v282 = add nsw i32 %v273, 32
+  %v283 = getelementptr inbounds i32, i32* %v274, i32 %v282
+  %v284 = bitcast i32* %v283 to <16 x i32>*
+  store <16 x i32> %v281, <16 x i32>* %v284, align 4, !tbaa !10
+  %v285 = shufflevector <32 x i32> %v270, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v286 = add nsw i32 %v273, 48
+  %v287 = getelementptr inbounds i32, i32* %v274, i32 %v286
+  %v288 = bitcast i32* %v287 to <16 x i32>*
+  store <16 x i32> %v285, <16 x i32>* %v288, align 4, !tbaa !10
+  %v289 = add nuw nsw i32 %v234, 1
+  %v290 = icmp eq i32 %v289, %v231
+  br i1 %v290, label %b17, label %b16
+
+b17:                                              ; preds = %b16, %b13
+  %v291 = add nsw i32 %v15, 63
+  %v292 = ashr i32 %v291, 6
+  %v293 = icmp slt i32 %v231, %v292
+  br i1 %v293, label %b18, label %b19, !prof !9
+
+b18:                                              ; preds = %b18, %b17
+  %v294 = phi i32 [ %v349, %b18 ], [ %v231, %b17 ]
+  %v295 = sub nsw i32 %v228, %v23
+  %v296 = add nsw i32 %v295, 1
+  %v297 = mul nsw i32 %v296, %v44
+  %v298 = sub nsw i32 %v24, %v27
+  %v299 = add nsw i32 %v297, %v298
+  %v300 = add nsw i32 %v299, -64
+  %v301 = getelementptr inbounds i8, i8* %v48, i32 %v300
+  %v302 = bitcast i8* %v301 to <16 x i32>*
+  %v303 = load <16 x i32>, <16 x i32>* %v302, align 1, !tbaa !7
+  %v304 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v303)
+  %v305 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v304)
+  %v306 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v304)
+  %v307 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v306)
+  %v308 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v305)
+  %v309 = add nsw i32 %v299, -63
+  %v310 = getelementptr inbounds i8, i8* %v48, i32 %v309
+  %v311 = bitcast i8* %v310 to <16 x i32>*
+  %v312 = load <16 x i32>, <16 x i32>* %v311, align 1, !tbaa !7
+  %v313 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %v312)
+  %v314 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v313)
+  %v315 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v314)
+  %v316 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v315)
+  %v317 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v315)
+  %v318 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v316, i32 168430090)
+  %v319 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v317, i32 168430090)
+  %v320 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v318, <16 x i32> %v319)
+  %v321 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v313)
+  %v322 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %v321)
+  %v323 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v322)
+  %v324 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v322)
+  %v325 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v323, i32 168430090)
+  %v326 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v324, i32 168430090)
+  %v327 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v325, <16 x i32> %v326)
+  %v328 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v307, <32 x i32> %v320)
+  %v329 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v308, <32 x i32> %v327)
+  %v330 = shufflevector <32 x i32> %v328, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v331 = mul nsw i32 %v296, %v222
+  %v332 = add nsw i32 %v331, %v298
+  %v333 = add nsw i32 %v332, -64
+  %v334 = bitcast i8* %v227 to i32*
+  %v335 = getelementptr inbounds i32, i32* %v334, i32 %v333
+  %v336 = bitcast i32* %v335 to <16 x i32>*
+  store <16 x i32> %v330, <16 x i32>* %v336, align 4, !tbaa !10
+  %v337 = shufflevector <32 x i32> %v328, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v338 = add nsw i32 %v332, -48
+  %v339 = getelementptr inbounds i32, i32* %v334, i32 %v338
+  %v340 = bitcast i32* %v339 to <16 x i32>*
+  store <16 x i32> %v337, <16 x i32>* %v340, align 4, !tbaa !10
+  %v341 = shufflevector <32 x i32> %v329, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v342 = add nsw i32 %v332, -32
+  %v343 = getelementptr inbounds i32, i32* %v334, i32 %v342
+  %v344 = bitcast i32* %v343 to <16 x i32>*
+  store <16 x i32> %v341, <16 x i32>* %v344, align 4, !tbaa !10
+  %v345 = shufflevector <32 x i32> %v329, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v346 = add nsw i32 %v332, -16
+  %v347 = getelementptr inbounds i32, i32* %v334, i32 %v346
+  %v348 = bitcast i32* %v347 to <16 x i32>*
+  store <16 x i32> %v345, <16 x i32>* %v348, align 4, !tbaa !10
+  %v349 = add nuw nsw i32 %v294, 1
+  %v350 = icmp eq i32 %v349, %v292
+  br i1 %v350, label %b19, label %b18
+
+b19:                                              ; preds = %b18, %b17
+  %v351 = add nsw i32 %v228, 1
+  %v352 = icmp eq i32 %v351, %v50
+  br i1 %v352, label %b14, label %b13
+
+b20:                                              ; preds = %b15, %b14
+  %v353 = icmp sgt i32 %v17, 0
+  br i1 %v353, label %b21, label %b31, !prof !9
+
+b21:                                              ; preds = %b20
+  %v354 = ashr i32 %v15, 6
+  %v355 = icmp slt i32 %v354, 0
+  %v356 = select i1 %v355, i32 0, i32 %v354
+  %v357 = icmp sgt i32 %v356, 0
+  br i1 %v357, label %b25, label %b27
+
+b22:                                              ; preds = %b25, %b22
+  %v358 = phi i32 [ %v442, %b22 ], [ 0, %b25 ]
+  %v359 = sub nsw i32 %v525, %v23
+  %v360 = mul nsw i32 %v359, %v222
+  %v361 = shl nsw i32 %v358, 6
+  %v362 = add nsw i32 %v361, %v21
+  %v363 = sub nsw i32 %v362, %v27
+  %v364 = add nsw i32 %v363, %v360
+  %v365 = bitcast i8* %v227 to i32*
+  %v366 = getelementptr inbounds i32, i32* %v365, i32 %v364
+  %v367 = bitcast i32* %v366 to <16 x i32>*
+  %v368 = load <16 x i32>, <16 x i32>* %v367, align 4, !tbaa !10
+  %v369 = add nsw i32 %v364, 16
+  %v370 = getelementptr inbounds i32, i32* %v365, i32 %v369
+  %v371 = bitcast i32* %v370 to <16 x i32>*
+  %v372 = load <16 x i32>, <16 x i32>* %v371, align 4, !tbaa !10
+  %v373 = shufflevector <16 x i32> %v368, <16 x i32> %v372, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v374 = add nsw i32 %v359, 1
+  %v375 = mul nsw i32 %v374, %v222
+  %v376 = add nsw i32 %v363, %v375
+  %v377 = getelementptr inbounds i32, i32* %v365, i32 %v376
+  %v378 = bitcast i32* %v377 to <16 x i32>*
+  %v379 = load <16 x i32>, <16 x i32>* %v378, align 4, !tbaa !10
+  %v380 = add nsw i32 %v376, 16
+  %v381 = getelementptr inbounds i32, i32* %v365, i32 %v380
+  %v382 = bitcast i32* %v381 to <16 x i32>*
+  %v383 = load <16 x i32>, <16 x i32>* %v382, align 4, !tbaa !10
+  %v384 = shufflevector <16 x i32> %v379, <16 x i32> %v383, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v385 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v384)
+  %v386 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v384)
+  %v387 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v385, i32 168430090)
+  %v388 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v386, i32 168430090)
+  %v389 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v387, <16 x i32> %v388)
+  %v390 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v373, <32 x i32> %v389)
+  %v391 = shufflevector <32 x i32> %v390, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v392 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v391, i32 20)
+  %v393 = shufflevector <32 x i32> %v390, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v394 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v393, i32 20)
+  %v395 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v394, <16 x i32> %v392)
+  %v396 = add nsw i32 %v364, 32
+  %v397 = getelementptr inbounds i32, i32* %v365, i32 %v396
+  %v398 = bitcast i32* %v397 to <16 x i32>*
+  %v399 = load <16 x i32>, <16 x i32>* %v398, align 4, !tbaa !10
+  %v400 = add nsw i32 %v364, 48
+  %v401 = getelementptr inbounds i32, i32* %v365, i32 %v400
+  %v402 = bitcast i32* %v401 to <16 x i32>*
+  %v403 = load <16 x i32>, <16 x i32>* %v402, align 4, !tbaa !10
+  %v404 = shufflevector <16 x i32> %v399, <16 x i32> %v403, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v405 = add nsw i32 %v376, 32
+  %v406 = getelementptr inbounds i32, i32* %v365, i32 %v405
+  %v407 = bitcast i32* %v406 to <16 x i32>*
+  %v408 = load <16 x i32>, <16 x i32>* %v407, align 4, !tbaa !10
+  %v409 = add nsw i32 %v376, 48
+  %v410 = getelementptr inbounds i32, i32* %v365, i32 %v409
+  %v411 = bitcast i32* %v410 to <16 x i32>*
+  %v412 = load <16 x i32>, <16 x i32>* %v411, align 4, !tbaa !10
+  %v413 = shufflevector <16 x i32> %v408, <16 x i32> %v412, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v414 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v413)
+  %v415 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v413)
+  %v416 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v414, i32 168430090)
+  %v417 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v415, i32 168430090)
+  %v418 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v416, <16 x i32> %v417)
+  %v419 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v404, <32 x i32> %v418)
+  %v420 = shufflevector <32 x i32> %v419, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v421 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v420, i32 20)
+  %v422 = shufflevector <32 x i32> %v419, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v423 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v422, i32 20)
+  %v424 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v423, <16 x i32> %v421)
+  %v425 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v395)
+  %v426 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v395)
+  %v427 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v425, <16 x i32> %v426)
+  %v428 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v424)
+  %v429 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v424)
+  %v430 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v428, <16 x i32> %v429)
+  %v431 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v430, <16 x i32> %v427)
+  %v432 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v431)
+  %v433 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v431)
+  %v434 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v432, <16 x i32> %v433)
+  %v435 = mul nsw i32 %v23, %v19
+  %v436 = mul nsw i32 %v525, %v19
+  %v437 = add i32 %v435, %v21
+  %v438 = sub i32 %v436, %v437
+  %v439 = add i32 %v438, %v362
+  %v440 = getelementptr inbounds i8, i8* %v13, i32 %v439
+  %v441 = bitcast i8* %v440 to <16 x i32>*
+  store <16 x i32> %v434, <16 x i32>* %v441, align 1, !tbaa !12
+  %v442 = add nuw nsw i32 %v358, 1
+  %v443 = icmp eq i32 %v442, %v356
+  br i1 %v443, label %b26, label %b22
+
+b23:                                              ; preds = %b26, %b23
+  %v444 = phi i32 [ %v521, %b23 ], [ %v356, %b26 ]
+  %v445 = sub nsw i32 %v24, %v27
+  %v446 = add nsw i32 %v360, %v445
+  %v447 = add nsw i32 %v446, -64
+  %v448 = getelementptr inbounds i32, i32* %v365, i32 %v447
+  %v449 = bitcast i32* %v448 to <16 x i32>*
+  %v450 = load <16 x i32>, <16 x i32>* %v449, align 4, !tbaa !10
+  %v451 = add nsw i32 %v446, -48
+  %v452 = getelementptr inbounds i32, i32* %v365, i32 %v451
+  %v453 = bitcast i32* %v452 to <16 x i32>*
+  %v454 = load <16 x i32>, <16 x i32>* %v453, align 4, !tbaa !10
+  %v455 = shufflevector <16 x i32> %v450, <16 x i32> %v454, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v456 = add nsw i32 %v375, %v445
+  %v457 = add nsw i32 %v456, -64
+  %v458 = getelementptr inbounds i32, i32* %v365, i32 %v457
+  %v459 = bitcast i32* %v458 to <16 x i32>*
+  %v460 = load <16 x i32>, <16 x i32>* %v459, align 4, !tbaa !10
+  %v461 = add nsw i32 %v456, -48
+  %v462 = getelementptr inbounds i32, i32* %v365, i32 %v461
+  %v463 = bitcast i32* %v462 to <16 x i32>*
+  %v464 = load <16 x i32>, <16 x i32>* %v463, align 4, !tbaa !10
+  %v465 = shufflevector <16 x i32> %v460, <16 x i32> %v464, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v466 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v465)
+  %v467 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v465)
+  %v468 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v466, i32 168430090)
+  %v469 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v467, i32 168430090)
+  %v470 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v468, <16 x i32> %v469)
+  %v471 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v455, <32 x i32> %v470)
+  %v472 = shufflevector <32 x i32> %v471, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v473 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v472, i32 20)
+  %v474 = shufflevector <32 x i32> %v471, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v475 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v474, i32 20)
+  %v476 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v475, <16 x i32> %v473)
+  %v477 = add nsw i32 %v446, -32
+  %v478 = getelementptr inbounds i32, i32* %v365, i32 %v477
+  %v479 = bitcast i32* %v478 to <16 x i32>*
+  %v480 = load <16 x i32>, <16 x i32>* %v479, align 4, !tbaa !10
+  %v481 = add nsw i32 %v446, -16
+  %v482 = getelementptr inbounds i32, i32* %v365, i32 %v481
+  %v483 = bitcast i32* %v482 to <16 x i32>*
+  %v484 = load <16 x i32>, <16 x i32>* %v483, align 4, !tbaa !10
+  %v485 = shufflevector <16 x i32> %v480, <16 x i32> %v484, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v486 = add nsw i32 %v456, -32
+  %v487 = getelementptr inbounds i32, i32* %v365, i32 %v486
+  %v488 = bitcast i32* %v487 to <16 x i32>*
+  %v489 = load <16 x i32>, <16 x i32>* %v488, align 4, !tbaa !10
+  %v490 = add nsw i32 %v456, -16
+  %v491 = getelementptr inbounds i32, i32* %v365, i32 %v490
+  %v492 = bitcast i32* %v491 to <16 x i32>*
+  %v493 = load <16 x i32>, <16 x i32>* %v492, align 4, !tbaa !10
+  %v494 = shufflevector <16 x i32> %v489, <16 x i32> %v493, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v495 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v494)
+  %v496 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v494)
+  %v497 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v495, i32 168430090)
+  %v498 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v496, i32 168430090)
+  %v499 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v497, <16 x i32> %v498)
+  %v500 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v485, <32 x i32> %v499)
+  %v501 = shufflevector <32 x i32> %v500, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v502 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v501, i32 20)
+  %v503 = shufflevector <32 x i32> %v500, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v504 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v503, i32 20)
+  %v505 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v504, <16 x i32> %v502)
+  %v506 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v476)
+  %v507 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v476)
+  %v508 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v506, <16 x i32> %v507)
+  %v509 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v505)
+  %v510 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v505)
+  %v511 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v509, <16 x i32> %v510)
+  %v512 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v511, <16 x i32> %v508)
+  %v513 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v512)
+  %v514 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v512)
+  %v515 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v513, <16 x i32> %v514)
+  %v516 = add i32 %v15, -64
+  %v517 = sub i32 %v516, %v435
+  %v518 = add i32 %v517, %v436
+  %v519 = getelementptr inbounds i8, i8* %v13, i32 %v518
+  %v520 = bitcast i8* %v519 to <16 x i32>*
+  store <16 x i32> %v515, <16 x i32>* %v520, align 1, !tbaa !12
+  %v521 = add nuw nsw i32 %v444, 1
+  %v522 = icmp eq i32 %v521, %v527
+  br i1 %v522, label %b24, label %b23
+
+b24:                                              ; preds = %b26, %b23
+  %v523 = add nsw i32 %v525, 1
+  %v524 = icmp eq i32 %v523, %v50
+  br i1 %v524, label %b32, label %b25
+
+b25:                                              ; preds = %b24, %b21
+  %v525 = phi i32 [ %v523, %b24 ], [ %v23, %b21 ]
+  br label %b22
+
+b26:                                              ; preds = %b22
+  %v526 = add nsw i32 %v15, 63
+  %v527 = ashr i32 %v526, 6
+  %v528 = icmp slt i32 %v356, %v527
+  br i1 %v528, label %b23, label %b24, !prof !9
+
+b27:                                              ; preds = %b21
+  %v529 = add nsw i32 %v15, 63
+  %v530 = ashr i32 %v529, 6
+  %v531 = icmp slt i32 %v356, %v530
+  br i1 %v531, label %b29, label %b31
+
+b28:                                              ; preds = %b29, %b28
+  %v532 = phi i32 [ %v616, %b28 ], [ %v356, %b29 ]
+  %v533 = sub nsw i32 %v618, %v23
+  %v534 = mul nsw i32 %v533, %v222
+  %v535 = sub nsw i32 %v24, %v27
+  %v536 = add nsw i32 %v534, %v535
+  %v537 = add nsw i32 %v536, -64
+  %v538 = bitcast i8* %v227 to i32*
+  %v539 = getelementptr inbounds i32, i32* %v538, i32 %v537
+  %v540 = bitcast i32* %v539 to <16 x i32>*
+  %v541 = load <16 x i32>, <16 x i32>* %v540, align 4, !tbaa !10
+  %v542 = add nsw i32 %v536, -48
+  %v543 = getelementptr inbounds i32, i32* %v538, i32 %v542
+  %v544 = bitcast i32* %v543 to <16 x i32>*
+  %v545 = load <16 x i32>, <16 x i32>* %v544, align 4, !tbaa !10
+  %v546 = shufflevector <16 x i32> %v541, <16 x i32> %v545, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v547 = add nsw i32 %v533, 1
+  %v548 = mul nsw i32 %v547, %v222
+  %v549 = add nsw i32 %v548, %v535
+  %v550 = add nsw i32 %v549, -64
+  %v551 = getelementptr inbounds i32, i32* %v538, i32 %v550
+  %v552 = bitcast i32* %v551 to <16 x i32>*
+  %v553 = load <16 x i32>, <16 x i32>* %v552, align 4, !tbaa !10
+  %v554 = add nsw i32 %v549, -48
+  %v555 = getelementptr inbounds i32, i32* %v538, i32 %v554
+  %v556 = bitcast i32* %v555 to <16 x i32>*
+  %v557 = load <16 x i32>, <16 x i32>* %v556, align 4, !tbaa !10
+  %v558 = shufflevector <16 x i32> %v553, <16 x i32> %v557, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v559 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v558)
+  %v560 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v558)
+  %v561 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v559, i32 168430090)
+  %v562 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v560, i32 168430090)
+  %v563 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v561, <16 x i32> %v562)
+  %v564 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v546, <32 x i32> %v563)
+  %v565 = shufflevector <32 x i32> %v564, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v566 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v565, i32 20)
+  %v567 = shufflevector <32 x i32> %v564, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v568 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v567, i32 20)
+  %v569 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v568, <16 x i32> %v566)
+  %v570 = add nsw i32 %v536, -32
+  %v571 = getelementptr inbounds i32, i32* %v538, i32 %v570
+  %v572 = bitcast i32* %v571 to <16 x i32>*
+  %v573 = load <16 x i32>, <16 x i32>* %v572, align 4, !tbaa !10
+  %v574 = add nsw i32 %v536, -16
+  %v575 = getelementptr inbounds i32, i32* %v538, i32 %v574
+  %v576 = bitcast i32* %v575 to <16 x i32>*
+  %v577 = load <16 x i32>, <16 x i32>* %v576, align 4, !tbaa !10
+  %v578 = shufflevector <16 x i32> %v573, <16 x i32> %v577, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v579 = add nsw i32 %v549, -32
+  %v580 = getelementptr inbounds i32, i32* %v538, i32 %v579
+  %v581 = bitcast i32* %v580 to <16 x i32>*
+  %v582 = load <16 x i32>, <16 x i32>* %v581, align 4, !tbaa !10
+  %v583 = add nsw i32 %v549, -16
+  %v584 = getelementptr inbounds i32, i32* %v538, i32 %v583
+  %v585 = bitcast i32* %v584 to <16 x i32>*
+  %v586 = load <16 x i32>, <16 x i32>* %v585, align 4, !tbaa !10
+  %v587 = shufflevector <16 x i32> %v582, <16 x i32> %v586, <32 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v588 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v587)
+  %v589 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v587)
+  %v590 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v588, i32 168430090)
+  %v591 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %v589, i32 168430090)
+  %v592 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v590, <16 x i32> %v591)
+  %v593 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v578, <32 x i32> %v592)
+  %v594 = shufflevector <32 x i32> %v593, <32 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  %v595 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v594, i32 20)
+  %v596 = shufflevector <32 x i32> %v593, <32 x i32> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  %v597 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %v596, i32 20)
+  %v598 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v597, <16 x i32> %v595)
+  %v599 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v569)
+  %v600 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v569)
+  %v601 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v599, <16 x i32> %v600)
+  %v602 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v598)
+  %v603 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v598)
+  %v604 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v602, <16 x i32> %v603)
+  %v605 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v604, <16 x i32> %v601)
+  %v606 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v605)
+  %v607 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v605)
+  %v608 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %v606, <16 x i32> %v607)
+  %v609 = mul nsw i32 %v23, %v19
+  %v610 = mul nsw i32 %v618, %v19
+  %v611 = add i32 %v15, -64
+  %v612 = sub i32 %v611, %v609
+  %v613 = add i32 %v612, %v610
+  %v614 = getelementptr inbounds i8, i8* %v13, i32 %v613
+  %v615 = bitcast i8* %v614 to <16 x i32>*
+  store <16 x i32> %v608, <16 x i32>* %v615, align 1, !tbaa !12
+  %v616 = add nuw nsw i32 %v532, 1
+  %v617 = icmp eq i32 %v616, %v530
+  br i1 %v617, label %b30, label %b28
+
+b29:                                              ; preds = %b30, %b27
+  %v618 = phi i32 [ %v619, %b30 ], [ %v23, %b27 ]
+  br label %b28
+
+b30:                                              ; preds = %b28
+  %v619 = add nsw i32 %v618, 1
+  %v620 = icmp eq i32 %v619, %v50
+  br i1 %v620, label %b32, label %b29
+
+b31:                                              ; preds = %b27, %b20
+  %v621 = icmp eq i8* %v227, null
+  br i1 %v621, label %b33, label %b32
+
+b32:                                              ; preds = %b31, %b30, %b24
+  tail call void @f3(i8* null, i8* %v227) #2
+  br label %b33
+
+b33:                                              ; preds = %b32, %b31
+  ret i32 0
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nobuiltin nounwind }
+
+!llvm.module.flags = !{!0, !1, !2}
+
+!0 = !{i32 2, !"halide_use_soft_float_abi", i32 0}
+!1 = !{i32 2, !"halide_mcpu", !"hexagonv60"}
+!2 = !{i32 2, !"halide_mattrs", !"+hvx"}
+!3 = !{!"branch_weights", i32 0, i32 1073741824}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"input", !6}
+!6 = !{!"Halide buffer"}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"constant_exterior", !6}
+!9 = !{!"branch_weights", i32 1073741824, i32 0}
+!10 = !{!11, !11, i64 0}
+!11 = !{!"rows", !6}
+!12 = !{!13, !13, i64 0}
+!13 = !{!"gaussian11", !6}

Added: llvm/trunk/test/CodeGen/Hexagon/const-combine.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/const-combine.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/const-combine.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/const-combine.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,48 @@
+; RUN: llc -march=hexagon -disable-const64=1 < %s | FileCheck %s
+; CHECK: combine(##4917,#88)
+
+target triple = "hexagon"
+
+%s.1 = type { %s.2 }
+%s.2 = type { i32, i8* }
+
+ at g0 = internal constant [61 x i8] c"............................................................\00", align 4
+ at g1 = internal constant %s.1 { %s.2 { i32 8, i8* getelementptr inbounds ([61 x i8], [61 x i8]* @g0, i32 0, i32 0) } }, align 4
+
+define void @f0(i32 %a0) local_unnamed_addr #0 {
+b0:
+  %v0 = alloca i8*, align 4
+  store i8* null, i8** %v0, align 4, !tbaa !0
+  call void @f1(i32 88, i16 zeroext 4917, i8** nonnull %v0) #0
+  %v1 = load i8*, i8** %v0, align 4, !tbaa !0
+  %v2 = icmp eq i8* %v1, null
+  br i1 %v2, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  call void @f2(%s.1* nonnull @g1) #0
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v3 = call i32 @f3(i8 zeroext 22, i8* null, i8* nonnull %v1, i16 zeroext 88) #0
+  %v4 = load i8*, i8** %v0, align 4, !tbaa !0
+  call void @f4(i8* %v4, i32 88) #0
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  ret void
+}
+
+declare void @f1(i32, i16 zeroext, i8**) local_unnamed_addr
+
+declare void @f2(%s.1*) local_unnamed_addr
+
+declare i32 @f3(i8 zeroext, i8*, i8*, i16 zeroext) local_unnamed_addr
+
+declare void @f4(i8*, i32) local_unnamed_addr
+
+attributes #0 = { nounwind optsize }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/constext-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/constext-call.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/constext-call.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/constext-call.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,59 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that the first packet contains 4 instructions, which includes
+; a call. The compiler marked the call as constant extended incorrectly,
+; which meant it couldn't fit in the first packet. But, calls are not
+; constant extended by the compiler.
+
+; CHECK: {
+; CHECK-NEXT: call f1
+; CHECK-NEXT: combine
+; CHECK-NEXT: memd
+; CHECK-NEXT: allocframe
+; CHECK-NEXT: }
+
+
+ at g0 = external global i32
+
+; Function Attrs: noinline nounwind
+define i32 @f0(i32 %a0, i32* nocapture %a1) #0 {
+b0:
+  %v0 = tail call i32 @f1(i32 %a0)
+  %v1 = icmp eq i32 %v0, 0
+  %v2 = select i1 %v1, i32 3, i32 %a0
+  store i32 %v2, i32* %a1, align 4
+  switch i32 %a0, label %b5 [
+    i32 0, label %b1
+    i32 1, label %b2
+    i32 2, label %b3
+    i32 4, label %b4
+  ]
+
+b1:                                               ; preds = %b0
+  store i32 0, i32* %a1, align 4
+  br label %b5
+
+b2:                                               ; preds = %b0
+  %v3 = load i32, i32* @g0, align 4
+  %v4 = icmp sgt i32 %v3, 100
+  %v5 = select i1 %v4, i32 0, i32 3
+  store i32 %v5, i32* %a1, align 4
+  br label %b5
+
+b3:                                               ; preds = %b0
+  store i32 1, i32* %a1, align 4
+  br label %b5
+
+b4:                                               ; preds = %b0
+  store i32 2, i32* %a1, align 4
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3, %b2, %b1, %b0
+  ret i32 undef
+}
+
+; Function Attrs: noinline nounwind readnone
+declare i32 @f1(i32) #1
+
+attributes #0 = { noinline nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { noinline nounwind readnone "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/constext-immstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/constext-immstore.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/constext-immstore.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/constext-immstore.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,162 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+%s.0 = type { i8, i8, i8*, i8, i32, %s.0*, %s.0* }
+%s.1 = type { %s.1*, %s.2, %s.0*, %s.2 }
+%s.2 = type { i8, %s.3, i8 }
+%s.3 = type { %s.4* }
+%s.4 = type { [65 x i8], i16, %s.4*, %s.4* }
+
+ at g0 = private unnamed_addr constant [4 x i8] c"and\00", align 1
+ at g1 = private unnamed_addr constant [3 x i8] c"or\00", align 1
+ at g2 = private unnamed_addr constant [8 x i8] c"implies\00", align 1
+ at g3 = private unnamed_addr constant [3 x i8] c"if\00", align 1
+ at g4 = global [4 x %s.0] [%s.0 { i8 1, i8 38, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8 1, i32 8, %s.0* null, %s.0* null }, %s.0 { i8 2, i8 124, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g1, i32 0, i32 0), i8 1, i32 7, %s.0* null, %s.0* null }, %s.0 { i8 3, i8 62, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g2, i32 0, i32 0), i8 1, i32 1, %s.0* null, %s.0* null }, %s.0 { i8 4, i8 60, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g3, i32 0, i32 0), i8 1, i32 1, %s.0* null, %s.0* null }], align 8
+ at g5 = internal global [64 x i8] zeroinitializer, align 8
+ at g6 = internal unnamed_addr global %s.0* null, align 4
+
+; Function Attrs: nounwind
+define %s.1* @f0() #0 {
+b0:
+  %v0 = tail call %s.1* @f1(%s.1* null) #0
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = tail call zeroext i8 @f2(i8* getelementptr inbounds ([64 x i8], [64 x i8]* @g5, i32 0, i32 0)) #0
+  switch i8 %v1, label %b1 [
+    i8 8, label %b2
+    i8 6, label %b2
+  ]
+
+b2:                                               ; preds = %b1, %b1
+  ret %s.1* %v0
+}
+
+declare %s.1* @f1(%s.1*) #0
+
+declare zeroext i8 @f2(i8*) #0
+
+; Function Attrs: nounwind
+define void @f3() #0 {
+b0:
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %s.0** @g6, align 4
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %s.0** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0, i32 5), align 8
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %s.0** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0, i32 6), align 4
+  %v0 = load i8*, i8** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 1, i32 2), align 4
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi %s.0* [ getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 0), %b0 ], [ %v9, %b1 ]
+  %v2 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 2
+  %v3 = load i8*, i8** %v2, align 4
+  %v4 = tail call i32 @f4(i8* %v0, i8* %v3) #0
+  %v5 = icmp sgt i32 %v4, 0
+  %v6 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 5
+  %v7 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 6
+  %v8 = select i1 %v5, %s.0** %v6, %s.0** %v7
+  %v9 = load %s.0*, %s.0** %v8, align 4
+  %v10 = icmp eq %s.0* %v9, null
+  br i1 %v10, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v11 = phi i32 [ %v4, %b1 ]
+  %v12 = phi %s.0* [ %v1, %b1 ]
+  %v13 = icmp sgt i32 %v11, 0
+  br i1 %v13, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  %v14 = getelementptr inbounds %s.0, %s.0* %v12, i32 0, i32 5
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 1), %s.0** %v14, align 4
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v15 = getelementptr inbounds %s.0, %s.0* %v12, i32 0, i32 6
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 1), %s.0** %v15, align 4
+  %v16 = load %s.0*, %s.0** @g6, align 4
+  %v17 = icmp eq %s.0* %v16, null
+  br i1 %v17, label %b8, label %b5
+
+b5:                                               ; preds = %b4
+  %v18 = load i8*, i8** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 2, i32 2), align 4
+  br label %b6
+
+b6:                                               ; preds = %b6, %b5
+  %v19 = phi %s.0* [ %v16, %b5 ], [ %v27, %b6 ]
+  %v20 = getelementptr inbounds %s.0, %s.0* %v19, i32 0, i32 2
+  %v21 = load i8*, i8** %v20, align 4
+  %v22 = tail call i32 @f4(i8* %v18, i8* %v21) #0
+  %v23 = icmp sgt i32 %v22, 0
+  %v24 = getelementptr inbounds %s.0, %s.0* %v19, i32 0, i32 5
+  %v25 = getelementptr inbounds %s.0, %s.0* %v19, i32 0, i32 6
+  %v26 = select i1 %v23, %s.0** %v24, %s.0** %v25
+  %v27 = load %s.0*, %s.0** %v26, align 4
+  %v28 = icmp eq %s.0* %v27, null
+  br i1 %v28, label %b7, label %b6
+
+b7:                                               ; preds = %b6
+  %v29 = phi i32 [ %v22, %b6 ]
+  %v30 = phi %s.0* [ %v19, %b6 ]
+  br label %b8
+
+b8:                                               ; preds = %b7, %b4
+  %v31 = phi i32 [ %v11, %b4 ], [ %v29, %b7 ]
+  %v32 = phi %s.0* [ null, %b4 ], [ %v30, %b7 ]
+  %v33 = icmp sgt i32 %v31, 0
+  br i1 %v33, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  %v34 = getelementptr inbounds %s.0, %s.0* %v32, i32 0, i32 5
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 2), %s.0** %v34, align 4
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  %v35 = getelementptr inbounds %s.0, %s.0* %v32, i32 0, i32 6
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 2), %s.0** %v35, align 4
+  %v36 = load %s.0*, %s.0** @g6, align 4
+  %v37 = icmp eq %s.0* %v36, null
+  br i1 %v37, label %b14, label %b11
+
+b11:                                              ; preds = %b10
+  %v38 = load i8*, i8** getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 3, i32 2), align 4
+  br label %b12
+
+b12:                                              ; preds = %b12, %b11
+  %v39 = phi %s.0* [ %v36, %b11 ], [ %v47, %b12 ]
+  %v40 = getelementptr inbounds %s.0, %s.0* %v39, i32 0, i32 2
+  %v41 = load i8*, i8** %v40, align 4
+  %v42 = tail call i32 @f4(i8* %v38, i8* %v41) #0
+  %v43 = icmp sgt i32 %v42, 0
+  %v44 = getelementptr inbounds %s.0, %s.0* %v39, i32 0, i32 5
+  %v45 = getelementptr inbounds %s.0, %s.0* %v39, i32 0, i32 6
+  %v46 = select i1 %v43, %s.0** %v44, %s.0** %v45
+  %v47 = load %s.0*, %s.0** %v46, align 4
+  %v48 = icmp eq %s.0* %v47, null
+  br i1 %v48, label %b13, label %b12
+
+b13:                                              ; preds = %b12
+  %v49 = phi i32 [ %v42, %b12 ]
+  %v50 = phi %s.0* [ %v39, %b12 ]
+  br label %b14
+
+b14:                                              ; preds = %b13, %b10
+  %v51 = phi i32 [ %v31, %b10 ], [ %v49, %b13 ]
+  %v52 = phi %s.0* [ null, %b10 ], [ %v50, %b13 ]
+  %v53 = icmp sgt i32 %v51, 0
+  br i1 %v53, label %b15, label %b16
+
+b15:                                              ; preds = %b14
+  %v54 = getelementptr inbounds %s.0, %s.0* %v52, i32 0, i32 5
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 3), %s.0** %v54, align 4
+  br label %b16
+
+b16:                                              ; preds = %b15, %b14
+  %v55 = getelementptr inbounds %s.0, %s.0* %v52, i32 0, i32 6
+  store %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g4, i32 0, i32 3), %s.0** %v55, align 4
+  ret void
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @f4(i8* nocapture, i8* nocapture) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readonly }

Added: llvm/trunk/test/CodeGen/Hexagon/constext-replace.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/constext-replace.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/constext-replace.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/constext-replace.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,151 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Check that 3 or more addressing modes using the same constant extender are
+; transformed into using a register.
+; CHECK: r{{[0-9]+}} = ##g1
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2)
+; CHECK-NOT: r{{[0-9]+}} = memw(r{{[0-9]+}}<<#2+##g1)
+; CHECK-NOT: r{{[0-9]+}} = memw(r{{[0-9]+}}<<#2+##g1)
+; CHECK-NOT: r{{[0-9]+}} = memw(r{{[0-9]+}}<<#2+##g1)
+; CHECK:  memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) = r{{[0-9]+}}
+; CHECK:  memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) = r{{[0-9]+}}
+; CHECK:  memw(r{{[0-9]+}}+r{{[0-9]+}}<<#2) = r{{[0-9]+}}
+; CHECK-NOT:  memw(r{{[0-9]+}}<<#2+##g1) = r{{[0-9]+}}
+; CHECK-NOT:  memw(r{{[0-9]+}}<<#2+##g1) = r{{[0-9]+}}
+; CHECK-NOT:  memw(r{{[0-9]+}}<<#2+##g1) = r{{[0-9]+}}
+
+target triple = "hexagon-unknown-linux-gnu"
+
+ at g0 = external global i32
+ at g1 = external global [13595 x i32], align 8
+ at g2 = external global [13595 x i32], align 8
+
+define i32 @f0(i32 %a0, i32* nocapture %a1) {
+b0:
+  %v0 = load i32, i32* %a1, align 4
+  %v1 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v0
+  %v2 = load i32, i32* %v1, align 4
+  %v3 = icmp sgt i32 %v2, %a0
+  br i1 %v3, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v4 = load i32, i32* @g0, align 4
+  store i32 %v4, i32* %a1, align 4
+  %v5 = load i32, i32* @g0, align 4
+  %v6 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v5
+  %v7 = load i32, i32* %v6, align 4
+  store i32 %v7, i32* @g0, align 4
+  %v8 = load i32, i32* %a1, align 4
+  %v9 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v8
+  store i32 %v0, i32* %v9, align 4
+  %v10 = load i32, i32* %a1, align 4
+  %v11 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v10
+  store i32 %a0, i32* %v11, align 4
+  br label %b16
+
+b2:                                               ; preds = %b0
+  %v12 = icmp eq i32 %v2, %a0
+  br i1 %v12, label %b16, label %b3
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b13, %b3
+  %v13 = phi i32 [ %v45, %b13 ], [ %v0, %b3 ]
+  %v14 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v13
+  %v15 = load i32, i32* %v14, align 4
+  %v16 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v15
+  %v17 = load i32, i32* %v16, align 4
+  %v18 = icmp slt i32 %v17, %a0
+  br i1 %v18, label %b7, label %b5
+
+b5:                                               ; preds = %b4
+  %v19 = icmp eq i32 %v17, %a0
+  br i1 %v19, label %b16, label %b6
+
+b6:                                               ; preds = %b5
+  %v20 = load i32, i32* @g0, align 4
+  store i32 %v20, i32* %v14, align 4
+  %v21 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v20
+  store i32 %a0, i32* %v21, align 4
+  %v22 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v20
+  %v23 = load i32, i32* %v22, align 4
+  store i32 %v23, i32* @g0, align 4
+  store i32 %v15, i32* %v22, align 4
+  br label %b16
+
+b7:                                               ; preds = %b4
+  %v24 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v15
+  %v25 = load i32, i32* %v24, align 4
+  %v26 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v25
+  %v27 = load i32, i32* %v26, align 4
+  %v28 = icmp slt i32 %v27, %a0
+  br i1 %v28, label %b10, label %b8
+
+b8:                                               ; preds = %b7
+  %v29 = icmp eq i32 %v27, %a0
+  br i1 %v29, label %b16, label %b9
+
+b9:                                               ; preds = %b8
+  %v30 = load i32, i32* @g0, align 4
+  store i32 %v30, i32* %v24, align 4
+  %v31 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v30
+  store i32 %a0, i32* %v31, align 4
+  %v32 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v30
+  %v33 = load i32, i32* %v32, align 4
+  store i32 %v33, i32* @g0, align 4
+  store i32 %v25, i32* %v32, align 4
+  br label %b16
+
+b10:                                              ; preds = %b7
+  %v34 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v25
+  %v35 = load i32, i32* %v34, align 4
+  %v36 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v35
+  %v37 = load i32, i32* %v36, align 4
+  %v38 = icmp slt i32 %v37, %a0
+  br i1 %v38, label %b13, label %b11
+
+b11:                                              ; preds = %b10
+  %v39 = icmp eq i32 %v37, %a0
+  br i1 %v39, label %b16, label %b12
+
+b12:                                              ; preds = %b11
+  %v40 = load i32, i32* @g0, align 4
+  store i32 %v40, i32* %v34, align 4
+  %v41 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v40
+  store i32 %a0, i32* %v41, align 4
+  %v42 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v40
+  %v43 = load i32, i32* %v42, align 4
+  store i32 %v43, i32* @g0, align 4
+  store i32 %v35, i32* %v42, align 4
+  br label %b16
+
+b13:                                              ; preds = %b10
+  %v44 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v35
+  %v45 = load i32, i32* %v44, align 4
+  %v46 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v45
+  %v47 = load i32, i32* %v46, align 4
+  %v48 = icmp slt i32 %v47, %a0
+  br i1 %v48, label %b4, label %b14
+
+b14:                                              ; preds = %b13
+  %v49 = icmp eq i32 %v47, %a0
+  br i1 %v49, label %b16, label %b15
+
+b15:                                              ; preds = %b14
+  %v50 = load i32, i32* @g0, align 4
+  store i32 %v50, i32* %v44, align 4
+  %v51 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g1, i32 0, i32 %v50
+  store i32 %a0, i32* %v51, align 4
+  %v52 = getelementptr inbounds [13595 x i32], [13595 x i32]* @g2, i32 0, i32 %v50
+  %v53 = load i32, i32* %v52, align 4
+  store i32 %v53, i32* @g0, align 4
+  store i32 %v45, i32* %v52, align 4
+  br label %b16
+
+b16:                                              ; preds = %b15, %b14, %b12, %b11, %b9, %b8, %b6, %b5, %b2, %b1
+  %v54 = phi i32 [ 1, %b1 ], [ 1, %b6 ], [ 1, %b9 ], [ 1, %b12 ], [ 1, %b15 ], [ 0, %b2 ], [ 0, %b5 ], [ 0, %b8 ], [ 0, %b11 ], [ 0, %b14 ]
+  ret i32 %v54
+}

Added: llvm/trunk/test/CodeGen/Hexagon/count_0s.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/count_0s.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/count_0s.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/count_0s.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,52 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0) #0 {
+b0:
+; CHECK: cl0
+  %v0 = tail call i32 @llvm.ctlz.i32(i32 %a0, i1 true)
+  ret i32 %v0
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare i32 @llvm.ctlz.i32(i32, i1) #1
+
+; Function Attrs: nounwind readnone speculatable
+declare i64 @llvm.ctlz.i64(i64, i1) #1
+
+; Function Attrs: nounwind readnone
+define i32 @f1(i32 %a0) #0 {
+b0:
+; CHECK: ct0
+  %v0 = tail call i32 @llvm.cttz.i32(i32 %a0, i1 true)
+  ret i32 %v0
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare i32 @llvm.cttz.i32(i32, i1) #1
+
+; Function Attrs: nounwind readnone speculatable
+declare i64 @llvm.cttz.i64(i64, i1) #1
+
+; Function Attrs: nounwind readnone
+define i32 @f2(i64 %a0) #0 {
+b0:
+; CHECK: cl0
+  %v0 = tail call i64 @llvm.ctlz.i64(i64 %a0, i1 true)
+  %v1 = trunc i64 %v0 to i32
+  ret i32 %v1
+}
+
+; Function Attrs: nounwind readnone
+define i32 @f3(i64 %a0) #0 {
+b0:
+; CHECK: ct0
+  %v0 = tail call i64 @llvm.cttz.i64(i64 %a0, i1 true)
+  %v1 = trunc i64 %v0 to i32
+  ret i32 %v1
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind readnone speculatable }

Added: llvm/trunk/test/CodeGen/Hexagon/csr-stubs-spill-threshold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/csr-stubs-spill-threshold.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/csr-stubs-spill-threshold.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/csr-stubs-spill-threshold.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,51 @@
+; RUN: llc -march=hexagon  -O2 -spill-func-threshold=2 < %s | FileCheck %s
+
+declare i32 @f0(i32, i32, i32, i32, i32, i32)
+
+; CHECK-LABEL: f1:
+; CHECK: save_r16_through_r23
+define i32 @f1(i32 %a0, i32 %a11, i32 %a22, i32 %a33, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = call i32 @f0(i32 0, i32 1, i32 2, i32 3, i32 4, i32 5)
+  %v1 = call i32 @f0(i32 %a0, i32 %a11, i32 %a22, i32 %a33, i32 %a4, i32 %a5)
+  %v2 = add i32 %v0, %v1
+  ret i32 %v2
+}
+
+declare i32 @f2(i32, i32, i32, i32)
+
+; CHECK-LABEL: f3:
+; CHECK: save_r16_through_r21
+define i32 @f3(i32 %a0, i32 %a11, i32 %a22, i32 %a33, i32 %a44, i32 %a5) #0 {
+b0:
+  %v0 = call i32 @f2(i32 0, i32 1, i32 2, i32 3)
+  %v1 = call i32 @f2(i32 %a0, i32 %a11, i32 %a22, i32 %a33)
+  %v2 = add i32 %v0, %v1
+  %v3 = add i32 %v2, %a44
+  ret i32 %v3
+}
+
+declare i32 @f4(i32, i32)
+
+; CHECK-LABEL: f5:
+; CHECK-NOT: save_r16_through_r19
+define i32 @f5(i32 %a0, i32 %a11, i32 %a22, i32 %a33, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = call i32 @f4(i32 0, i32 1)
+  %v1 = call i32 @f4(i32 %a0, i32 %a11)
+  %v2 = add i32 %v0, %v1
+  ret i32 %v2
+}
+
+declare i32 @f6(i32)
+
+; CHECK-LABEL: f7:
+; CHECK-NOT: save_r16_through_r17
+define i32 @f7(i32 %a0, i32 %a11, i32 %a22, i32 %a3, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = call i32 @f6(i32 0)
+  %v1 = call i32 @f6(i32 %a0)
+  ret i32 %v0
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/csr_stub_calls_dwarf_frame_info.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/csr_stub_calls_dwarf_frame_info.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/csr_stub_calls_dwarf_frame_info.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/csr_stub_calls_dwarf_frame_info.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,20 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+target triple = "hexagon-unknown-linux-gnu"
+
+declare i32 @f0(i32, i32)
+
+; CHECK: __save_r16_through_r21
+; CHECK: __restore_r16_through_r21_and_deallocframe
+
+; Function Attrs: optsize
+define i32 @f1(i32 %a0, i32 %a11, i32 %a22, i32 %a33, i32 %a44) #0 {
+b0:
+  %v0 = call i32 @f0(i32 1, i32 1)
+  %v1 = call i32 @f0(i32 %a0, i32 %a11)
+  %v2 = call i32 @f0(i32 %a22, i32 %a33)
+  %v3 = call i32 @f0(i32 %a0, i32 %a44)
+  ret i32 %v3
+}
+
+attributes #0 = { optsize }

Added: llvm/trunk/test/CodeGen/Hexagon/dag-combine-select-or0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dag-combine-select-or0.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dag-combine-select-or0.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dag-combine-select-or0.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,29 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; Make sure no mux with 0 is generated.
+; CHECK-NOT: mux{{.*}}#0
+; CHECK: endloop
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = icmp ugt i32 %a0, %a1
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
+  %v2 = phi i32 [ 0, %b0 ], [ %v7, %b1 ]
+  %v3 = phi i32 [ 1, %b0 ], [ %v6, %b1 ]
+  %v4 = select i1 %v0, i32 %v3, i32 0
+  %v5 = or i32 %v1, %v4
+  %v6 = shl i32 %v3, 1
+  %v7 = add i32 %v2, 1
+  %v8 = icmp eq i32 %v7, 32
+  br i1 %v8, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret i32 %v5
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/dag-indexed.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dag-indexed.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dag-indexed.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dag-indexed.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,47 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the DAG combiner doesn't assert because it attempts to replace
+; the chain of a post-increment store based upon alias information. The code
+; in DAGCombiner is unable to convert indexed stores.
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i8* %a1, i8* %a2) #0 {
+b0:
+  switch i32 %a0, label %b5 [
+    i32 67830273, label %b1
+    i32 67502595, label %b3
+  ]
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b5
+
+b2:                                               ; preds = %b1
+  br label %b5
+
+b3:                                               ; preds = %b0
+  br i1 undef, label %b4, label %b5
+
+b4:                                               ; preds = %b3
+  %v0 = bitcast i8* %a2 to i32*
+  store i32 0, i32* %v0, align 1, !tbaa !0
+  %v1 = getelementptr inbounds i8, i8* %a1, i32 4
+  %v2 = bitcast i8* %v1 to i32*
+  %v3 = load i32, i32* %v2, align 4, !tbaa !5
+  %v4 = getelementptr inbounds i8, i8* %a2, i32 4
+  %v5 = bitcast i8* %v4 to i32*
+  store i32 %v3, i32* %v5, align 1, !tbaa !5
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3, %b2, %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !2, i64 0}
+!1 = !{!"", !2, i64 0, !2, i64 4}
+!2 = !{!"long", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!1, !2, i64 4}

Added: llvm/trunk/test/CodeGen/Hexagon/dccleana.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dccleana.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dccleana.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dccleana.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,17 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; CHECK: dccleana
+
+; Function Attrs: nounwind
+declare void @llvm.hexagon.Y2.dccleana(i8*) #0
+
+define i32 @f0(i8* %a0) {
+b0:
+  tail call void @llvm.hexagon.Y2.dccleana(i8* %a0)
+  %v0 = load i8, i8* %a0
+  %v1 = zext i8 %v0 to i32
+  ret i32 %v1
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/dealloc-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dealloc-store.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dealloc-store.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dealloc-store.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,65 @@
+; RUN: llc -march=hexagon -O2 -hexagon-shrink-frame=0 -hexagon-cext-threshold=1 < %s | FileCheck %s
+
+target triple = "hexagon"
+
+%s.0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+%s.1 = type { %s.2, [14 x %s.6*], [14 x i8], [6 x i8], [4 x %s.4], [4 x %s.8], [4 x %s.8], [14 x %s.10], %s.6*, %s.6* }
+%s.2 = type { [4 x %s.3], i16, i32, i32, i32, i32 }
+%s.3 = type { i8, i8, i8, i8 }
+%s.4 = type { i8, i32, [16 x %s.5], %s.6, i8, [7 x i8] }
+%s.5 = type { void (i8*)*, i8*, i32 }
+%s.6 = type { %s.7*, i32, %s.7*, i32, i32, i32, %s.7*, %s.7*, i32, i8, i32*, i32, i32, i32*, i32*, i32, i8, i32*, i32, %s.5*, i32, i32, i32, void (%s.6*)*, i32, i8 }
+%s.7 = type { i32, i16, i16 }
+%s.8 = type { %s.9 }
+%s.9 = type { i8*, i32, i32 }
+%s.10 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+
+ at g0 = internal constant %s.0 <{ i8* getelementptr inbounds ([125 x i8], [125 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([82 x i8], [82 x i8]* @g2, i32 0, i32 0), i16 1694, i8 4, i8 0, i8 0 }>, section ".rodata.diag", align 1
+ at g1 = private unnamed_addr constant [125 x i8] c"............................................................................................................................\00", align 8
+ at g2 = private unnamed_addr constant [82 x i8] c"Assertion (..............................................................) failed\00", align 8
+ at g3 = external global %s.1
+
+define void @f0(%s.6* %a0, i8 zeroext %a1) {
+;  look for a dealloc_return in a packet with nothing else.
+;
+; CHECK: if (p{{[0-3]}}) memw(
+; CHECK: }
+; CHECK: {
+; CHECK-NEXT: dealloc_return
+; CHECK-NEXT: }
+b0:
+  %v0 = add i8 %a1, -2
+  %v1 = icmp ugt i8 %v0, 1
+  br i1 %v1, label %b1, label %b2, !prof !0
+
+b1:                                               ; preds = %b0
+  tail call void @f1(%s.0* @g0) #1
+  unreachable
+
+b2:                                               ; preds = %b0
+  %v2 = icmp eq i8 %a1, 2
+  br i1 %v2, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  store %s.6* %a0, %s.6** getelementptr inbounds (%s.1, %s.1* @g3, i32 0, i32 8), align 4, !tbaa !1
+  br label %b5
+
+b4:                                               ; preds = %b2
+  store %s.6* %a0, %s.6** getelementptr inbounds (%s.1, %s.1* @g3, i32 0, i32 9), align 4, !tbaa !1
+  br label %b5
+
+b5:                                               ; preds = %b4, %b3
+  ret void
+}
+
+; Function Attrs: noreturn
+declare void @f1(%s.0*) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { noreturn }
+
+!0 = !{!"branch_weights", i32 4, i32 64}
+!1 = !{!2, !2, i64 0}
+!2 = !{!"any pointer", !3}
+!3 = !{!"omnipotent char", !4}
+!4 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/dealloc_return.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dealloc_return.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dealloc_return.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dealloc_return.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at g0 = external global i32
+ at g1 = external global i32
+ at g2 = external global i32
+
+; CHECK: allocframe(r29,
+; CHECK: dealloc_return
+; CHECK-NEXT: }
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = load i32, i32* @g0, align 4
+  store i32 %v1, i32* %v0, align 4
+  %v2 = load i32, i32* %v0, align 4
+  %v3 = load i32, i32* @g1, align 4
+  %v4 = mul nsw i32 %v2, %v3
+  %v5 = load i32, i32* @g2, align 4
+  %v6 = add nsw i32 %v4, %v5
+  store i32 %v6, i32* %v0, align 4
+  %v7 = load i32, i32* %v0, align 4
+  ret i32 %v7
+}
+
+attributes #0 = { nounwind "no-frame-pointer-elim"="true" }

Added: llvm/trunk/test/CodeGen/Hexagon/debug-line_table_start.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/debug-line_table_start.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/debug-line_table_start.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/debug-line_table_start.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; This test case is little iffy. It checks for line_table_start,
+; which in future may be completely replaced with some other label name.
+; The first check is the use, the second check is for defition.
+
+; CHECK: .Lline_table_start0
+; CHECK: .Lline_table_start0:
+
+; Function Attrs: nounwind
+define i32 @f0() #0 !dbg !5 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 0, i32* %v0, align 4
+  ret i32 0, !dbg !9
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "/tmp/1.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, isOptimized: false, unit: !0, variables: !2)
+!6 = !DISubroutineType(types: !7)
+!7 = !{!8}
+!8 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!9 = !DILocation(line: 2, column: 3, scope: !5)

Added: llvm/trunk/test/CodeGen/Hexagon/debug-prologue-loc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/debug-prologue-loc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/debug-prologue-loc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/debug-prologue-loc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,71 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+
+; CHECK: allocframe{{.*}}
+; CHECK-NEXT: }
+; CHECK-NEXT:{{.*}}tmp{{[0-9]+}}:
+; CHECK-NEXT: .loc {{[0-9 ]+}} prologue_end
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1) #0 !dbg !5 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i32*, align 4
+  store i32 %a0, i32* %v0, align 4
+  call void @llvm.dbg.declare(metadata i32* %v0, metadata !9, metadata !DIExpression()), !dbg !10
+  store i32 %a1, i32* %v1, align 4
+  call void @llvm.dbg.declare(metadata i32* %v1, metadata !11, metadata !DIExpression()), !dbg !12
+  call void @llvm.dbg.declare(metadata i32** %v2, metadata !13, metadata !DIExpression()), !dbg !15
+  store i32* %v1, i32** %v2, align 4, !dbg !15
+  %v3 = load i32, i32* %v0, align 4, !dbg !16
+  %v4 = load i32*, i32** %v2, align 4, !dbg !17
+  %v5 = call i32 @f1(i32* %v4), !dbg !18
+  %v6 = add nsw i32 %v3, %v5, !dbg !19
+  ret i32 %v6, !dbg !20
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+; Function Attrs: nounwind
+define i32 @f1(i32* %a0) #0 !dbg !21 {
+b0:
+  %v0 = alloca i32*, align 4
+  store i32* %a0, i32** %v0, align 4
+  call void @llvm.dbg.declare(metadata i32** %v0, metadata !24, metadata !DIExpression()), !dbg !25
+  ret i32 0, !dbg !26
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "/tmp/test.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!6 = !DISubroutineType(types: !7)
+!7 = !{!8, !8, !8}
+!8 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!9 = !DILocalVariable(name: "a", arg: 1, scope: !5, file: !1, line: 1, type: !8)
+!10 = !DILocation(line: 1, column: 13, scope: !5)
+!11 = !DILocalVariable(name: "b", arg: 2, scope: !5, file: !1, line: 1, type: !8)
+!12 = !DILocation(line: 1, column: 20, scope: !5)
+!13 = !DILocalVariable(name: "ptr", scope: !5, file: !1, line: 2, type: !14)
+!14 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !8, size: 32, align: 32)
+!15 = !DILocation(line: 2, column: 8, scope: !5)
+!16 = !DILocation(line: 3, column: 10, scope: !5)
+!17 = !DILocation(line: 3, column: 16, scope: !5)
+!18 = !DILocation(line: 3, column: 12, scope: !5)
+!19 = !DILocation(line: 3, column: 11, scope: !5)
+!20 = !DILocation(line: 3, column: 3, scope: !5)
+!21 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 7, type: !22, isLocal: false, isDefinition: true, scopeLine: 7, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!22 = !DISubroutineType(types: !23)
+!23 = !{!8, !14}
+!24 = !DILocalVariable(name: "var", arg: 1, scope: !21, file: !1, line: 7, type: !14)
+!25 = !DILocation(line: 7, column: 14, scope: !21)
+!26 = !DILocation(line: 8, column: 3, scope: !21)

Added: llvm/trunk/test/CodeGen/Hexagon/debug-prologue.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/debug-prologue.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/debug-prologue.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/debug-prologue.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,74 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Verify store/load for -g prologue
+
+; CHECK: allocframe
+; CHECK: memw([[MEM:.*]]) = r{{[0-9]+}}
+; CHECK: r{{[0-9]+}} = memw([[MEM]])
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 !dbg !5 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i32, align 4
+  store i32 %a0, i32* %v1, align 4
+  call void @llvm.dbg.declare(metadata i32* %v1, metadata !9, metadata !DIExpression()), !dbg !10
+  call void @llvm.dbg.declare(metadata i32* %v2, metadata !11, metadata !DIExpression()), !dbg !12
+  %v3 = load i32, i32* %v1, align 4, !dbg !13
+  %v4 = icmp sgt i32 %v3, 1, !dbg !15
+  br i1 %v4, label %b1, label %b2, !dbg !16
+
+b1:                                               ; preds = %b0
+  %v5 = load i32, i32* %v1, align 4, !dbg !17
+  %v6 = load i32, i32* %v1, align 4, !dbg !18
+  %v7 = sub nsw i32 %v6, 1, !dbg !19
+  %v8 = call i32 @f0(i32 %v7), !dbg !20
+  %v9 = mul nsw i32 %v5, %v8, !dbg !21
+  store i32 %v9, i32* %v0, align 4, !dbg !22
+  br label %b3, !dbg !22
+
+b2:                                               ; preds = %b0
+  %v10 = load i32, i32* %v1, align 4, !dbg !23
+  store i32 %v10, i32* %v0, align 4, !dbg !24
+  br label %b3, !dbg !24
+
+b3:                                               ; preds = %b2, %b1
+  %v11 = load i32, i32* %v0, align 4, !dbg !25
+  ret i32 %v11, !dbg !25
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "Clang", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "/tmp/test.c", directory: "/tmp")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "factorial", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: false, unit: !0, variables: !2)
+!6 = !DISubroutineType(types: !7)
+!7 = !{!8, !8}
+!8 = !DIBasicType(name: "int", size: 32, align: 32, encoding: DW_ATE_signed)
+!9 = !DILocalVariable(name: "value", arg: 1, scope: !5, file: !1, line: 1, type: !8)
+!10 = !DILocation(line: 1, column: 20, scope: !5)
+!11 = !DILocalVariable(name: "local_var", scope: !5, file: !1, line: 2, type: !8)
+!12 = !DILocation(line: 2, column: 7, scope: !5)
+!13 = !DILocation(line: 3, column: 7, scope: !14)
+!14 = distinct !DILexicalBlock(scope: !5, file: !1, line: 3, column: 7)
+!15 = !DILocation(line: 3, column: 13, scope: !14)
+!16 = !DILocation(line: 3, column: 7, scope: !5)
+!17 = !DILocation(line: 4, column: 12, scope: !14)
+!18 = !DILocation(line: 4, column: 28, scope: !14)
+!19 = !DILocation(line: 4, column: 33, scope: !14)
+!20 = !DILocation(line: 4, column: 18, scope: !14)
+!21 = !DILocation(line: 4, column: 17, scope: !14)
+!22 = !DILocation(line: 4, column: 5, scope: !14)
+!23 = !DILocation(line: 5, column: 15, scope: !14)
+!24 = !DILocation(line: 5, column: 8, scope: !14)
+!25 = !DILocation(line: 6, column: 1, scope: !5)

Added: llvm/trunk/test/CodeGen/Hexagon/def-undef-deps.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/def-undef-deps.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/def-undef-deps.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/def-undef-deps.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,73 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; The register coalescer created (via rematerialization) a definition of
+; a register (R0), which had "undef" flag set. This caused the def to be
+; ignored in the dependence graph, which then lead to an invalid instruction
+; move in the machine scheduler (and an assert).
+; The undef flags are already being cleared in the register cleanup, but
+; that happens after register allocation. The undef flags need to be cleared
+; earlier to avoid this issue.
+
+%0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+%1 = type { %2, %5, [3 x %3] }
+%2 = type { %3, %4, i16, i16 }
+%3 = type { i32, i32, i8, i8 }
+%4 = type { i32, i32, i32 }
+%5 = type { i8, i8, i8, i8, i32, i32, i16, i16, i32, i8, i8, i8, i32, i32, i16, i16, i32 }
+%6 = type { %7, i8, i16, i16, i8, i8, i8, i8, i8 }
+%7 = type { i32, i32, i16, i16, i16, i8 }
+
+ at g0 = external constant %0, align 1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.up(i32, i32) #1
+
+declare void @f0(%0*, i32, i32, i32, i32, i32)
+
+define void @f1(i8 zeroext %a0, %1* nocapture %a1, i8 zeroext %a2, i8 zeroext %a3) #0 {
+b0:
+  %v0 = getelementptr inbounds %1, %1* %a1, i32 0, i32 1, i32 9
+  %v1 = load i8, i8* %v0, align 1
+  %v2 = zext i8 %v1 to i32
+  %v3 = getelementptr inbounds %1, %1* %a1, i32 0, i32 2, i32 %v2
+  %v4 = tail call %6* @f2(i32 undef, i8 zeroext 0)
+  br i1 undef, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  %v5 = tail call i32 @llvm.hexagon.M2.mpy.up(i32 undef, i32 undef)
+  %v6 = tail call i32 @llvm.hexagon.M2.mpy.up(i32 undef, i32 undef)
+  %v7 = zext i32 %v5 to i64
+  %v8 = zext i32 %v6 to i64
+  %v9 = add nuw nsw i64 %v8, %v7
+  %v10 = lshr i64 %v9, 5
+  %v11 = trunc i64 %v10 to i32
+  store i32 %v11, i32* undef, align 4
+  br i1 undef, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  %v12 = getelementptr inbounds %3, %3* %v3, i32 0, i32 0
+  store i32 0, i32* %v12, align 4
+  tail call void @f0(%0* @g0, i32 undef, i32 0, i32 undef, i32 undef, i32 undef)
+  br label %b4
+
+b3:                                               ; preds = %b1
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  unreachable
+
+b5:                                               ; preds = %b0
+  br i1 undef, label %b6, label %b7
+
+b6:                                               ; preds = %b5
+  unreachable
+
+b7:                                               ; preds = %b5
+  unreachable
+}
+
+declare %6* @f2(i32, i8 zeroext)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/default-align.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/default-align.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/default-align.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/default-align.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,53 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+; Make sure we don't use setbit to add offsets to stack objects.
+; CHECK-NOT: setbit
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = alloca [64 x float], align 16
+  %v1 = bitcast [64 x float]* %v0 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 256, i8* %v1) #1
+  %v2 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 8
+  store float 0.000000e+00, float* %v2, align 16, !tbaa !0
+  %v3 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 0
+  store float 0.000000e+00, float* %v3, align 16, !tbaa !0
+  %v4 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 9
+  store float 0.000000e+00, float* %v4, align 4, !tbaa !0
+  %v5 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 1
+  store float 0.000000e+00, float* %v5, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 10
+  store float 0.000000e+00, float* %v6, align 8, !tbaa !0
+  %v7 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 2
+  store float 0.000000e+00, float* %v7, align 8, !tbaa !0
+  %v8 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 11
+  store float 1.000000e+00, float* %v8, align 4, !tbaa !0
+  %v9 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 3
+  store float 1.000000e+00, float* %v9, align 4, !tbaa !0
+  %v10 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 12
+  store float 0.000000e+00, float* %v10, align 16, !tbaa !0
+  %v11 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 4
+  store float 0.000000e+00, float* %v11, align 16, !tbaa !0
+  call void @f1(float* %v3) #2
+  call void @llvm.lifetime.end.p0i8(i64 256, i8* %v1) #1
+  ret void
+}
+
+declare void @f1(float*) #0
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"float", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/deflate.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/deflate.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/deflate.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/deflate.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Check that the parsing succeeded.
+; CHECK: f0
+
+target triple = "hexagon"
+
+ at g0 = external global [0 x i16], align 8
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br label %b2
+
+b1:                                               ; preds = %b2
+  ret void
+
+b2:                                               ; preds = %b2, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v1, %b2 ]
+  %v1 = add nsw i32 %v0, 4
+  %v2 = getelementptr [0 x i16], [0 x i16]* @g0, i32 0, i32 %v0
+  %v3 = bitcast i16* %v2 to <4 x i16>*
+  %v4 = load <4 x i16>, <4 x i16>* %v3, align 2
+  %v5 = icmp slt <4 x i16> %v4, zeroinitializer
+  %v6 = xor <4 x i16> %v4, <i16 -32768, i16 -32768, i16 -32768, i16 -32768>
+  %v7 = select <4 x i1> %v5, <4 x i16> %v6, <4 x i16> zeroinitializer
+  store <4 x i16> %v7, <4 x i16>* %v3, align 2
+  %v8 = icmp slt i32 %v1, 32768
+  br i1 %v8, label %b2, label %b1
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/dhry.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dhry.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dhry.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dhry.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,39 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: combine(#11,#10)
+
+; Function Attrs: nounwind
+define void @f0(i32* nocapture %a0, i32* nocapture %a1) #0 {
+b0:
+  br label %b2
+
+b1:                                               ; preds = %b4
+  br label %b5
+
+b2:                                               ; preds = %b0
+  %v0 = getelementptr inbounds i32, i32* %a0, i32 2
+  %v1 = getelementptr inbounds i32, i32* %a0, i32 3
+  br label %b3
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v2 = load i32, i32* %v0, align 4, !tbaa !0
+  %v3 = load i32, i32* %v1, align 4, !tbaa !0
+  %v4 = tail call i32 @f1(i32 %v2, i32 %v3) #0
+  %v5 = icmp eq i32 %v4, 0
+  br i1 %v5, label %b4, label %b1
+
+b5:                                               ; preds = %b1
+  %v6 = tail call i32 @f1(i32 10, i32 11) #0
+  ret void
+}
+
+declare i32 @f1(i32, i32)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/dhry_proc8.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dhry_proc8.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dhry_proc8.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dhry_proc8.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,53 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+
+; Test that we generate no more than 7 packets in f0.
+;
+; CHECK: f0:
+; CHECK: {
+; CHECK: {
+; CHECK: {
+; CHECK: {
+; CHECK: {
+; CHECK: {
+; CHECK: {
+; CHECK-NOT: {
+
+ at g0 = external global i32
+
+; Function Attrs: nounwind
+define i32 @f0(i32* nocapture %a0, [50 x i32]* nocapture %a1, i32 %a2, i32 %a3) #0 {
+b0:
+  %v0 = add nsw i32 %a2, 5
+  %v1 = getelementptr inbounds i32, i32* %a0, i32 %v0
+  store i32 %a3, i32* %v1, align 4, !tbaa !0
+  %v2 = add nsw i32 %a2, 6
+  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
+  store i32 %a3, i32* %v3, align 4, !tbaa !0
+  %v4 = add nsw i32 %a2, 35
+  %v5 = getelementptr inbounds i32, i32* %a0, i32 %v4
+  store i32 %v0, i32* %v5, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v0
+  store i32 %v0, i32* %v6, align 4, !tbaa !0
+  %v7 = add nsw i32 %a2, 6
+  %v8 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v7
+  store i32 %v0, i32* %v8, align 4, !tbaa !0
+  %v9 = add nsw i32 %a2, 4
+  %v10 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v9
+  %v11 = load i32, i32* %v10, align 4, !tbaa !0
+  %v12 = add nsw i32 %v11, 1
+  store i32 %v12, i32* %v10, align 4, !tbaa !0
+  %v13 = load i32, i32* %v1, align 4, !tbaa !0
+  %v14 = add nsw i32 %a2, 25
+  %v15 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v14, i32 %v0
+  store i32 %v13, i32* %v15, align 4, !tbaa !0
+  store i32 5, i32* @g0, align 4, !tbaa !0
+  ret i32 undef
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/dhry_stall.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dhry_stall.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dhry_stall.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dhry_stall.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,48 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+; CHECK: }
+; CHECK: [[REG0:r([0-9]+)]] = addasl
+; CHECK: {
+; CHECK: }
+; CHECK: memw([[REG0]]
+
+target triple = "hexagon"
+
+ at g0 = external global i32
+
+; Function Attrs: nounwind
+define i32 @f0(i32* nocapture %a0, [50 x i32]* nocapture %a1, i32 %a2, i32 %a3) #0 {
+b0:
+  %v0 = add nsw i32 %a2, 5
+  %v1 = getelementptr inbounds i32, i32* %a0, i32 %v0
+  store i32 %a3, i32* %v1, align 4, !tbaa !0
+  %v2 = add nsw i32 %a2, 6
+  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
+  store i32 %a3, i32* %v3, align 4, !tbaa !0
+  %v4 = add nsw i32 %a2, 35
+  %v5 = getelementptr inbounds i32, i32* %a0, i32 %v4
+  store i32 %v0, i32* %v5, align 4, !tbaa !0
+  %v6 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v0
+  store i32 %v0, i32* %v6, align 4, !tbaa !0
+  %v7 = add nsw i32 %a2, 6
+  %v8 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v7
+  store i32 %v0, i32* %v8, align 4, !tbaa !0
+  %v9 = add nsw i32 %a2, 4
+  %v10 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v0, i32 %v9
+  %v11 = load i32, i32* %v10, align 4, !tbaa !0
+  %v12 = add nsw i32 %v11, 1
+  store i32 %v12, i32* %v10, align 4, !tbaa !0
+  %v13 = load i32, i32* %v1, align 4, !tbaa !0
+  %v14 = add nsw i32 %a2, 25
+  %v15 = getelementptr inbounds [50 x i32], [50 x i32]* %a1, i32 %v14, i32 %v0
+  store i32 %v13, i32* %v15, align 4, !tbaa !0
+  store i32 5, i32* @g0, align 4, !tbaa !0
+  ret i32 undef
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dont_rotate_pregs_at_O2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,68 @@
+; RUN: llc -O2 -march=hexagon -hexagon-eif=0 < %s | FileCheck %s
+
+; Make sure we are not rotating registers at O2.
+; CHECK-NOT: p1 =
+; CHECK-NOT: p2 =
+
+target triple = "hexagon-unknown-linux-gnu"
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = icmp slt i32 %a0, %a1
+  br i1 %v0, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v1 = mul nsw i32 %a1, %a0
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v2 = phi i32 [ %v1, %b1 ], [ 0, %b0 ]
+  %v3 = icmp sgt i32 %a0, %a1
+  br i1 %v3, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  %v4 = mul nsw i32 %a2, %a1
+  %v5 = add nsw i32 %v4, %a0
+  %v6 = add nsw i32 %v5, %a3
+  %v7 = add nsw i32 %v6, %v2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v8 = phi i32 [ %v7, %b3 ], [ %v2, %b2 ]
+  %v9 = icmp sgt i32 %a2, %a3
+  br i1 %v9, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  %v10 = mul nsw i32 %a3, %a2
+  %v11 = add nsw i32 %v8, %v10
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4
+  %v12 = phi i32 [ %v11, %b5 ], [ %v8, %b4 ]
+  %v13 = icmp sgt i32 %a3, %a2
+  br i1 %v13, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  %v14 = sdiv i32 %a3, 2
+  %v15 = mul nsw i32 %v14, %a0
+  %v16 = add nsw i32 %v15, %v12
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6
+  %v17 = phi i32 [ %v16, %b7 ], [ %v12, %b6 ]
+  %v18 = icmp slt i32 %a4, %a5
+  br i1 %v18, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  %v19 = mul i32 %a4, %a3
+  %v20 = mul i32 %v19, %a5
+  %v21 = add nsw i32 %v17, %v20
+  br label %b10
+
+b10:                                              ; preds = %b9, %b8
+  %v22 = phi i32 [ %v21, %b9 ], [ %v17, %b8 ]
+  ret i32 %v22
+}
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/dwarf-discriminator.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/dwarf-discriminator.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/dwarf-discriminator.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/dwarf-discriminator.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,54 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: {{ discriminator }}
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0, i32 %a1) local_unnamed_addr #0 !dbg !5 {
+b0:
+  call void @llvm.dbg.value(metadata i32 %a0, metadata !10, metadata !DIExpression()), !dbg !12
+  call void @llvm.dbg.value(metadata i32 %a1, metadata !11, metadata !DIExpression()), !dbg !13
+  %v0 = mul nsw i32 %a1, 30000, !dbg !14
+  %v1 = icmp slt i32 %v0, %a0, !dbg !16
+  %v2 = select i1 %v1, i32 %a0, i32 %v0, !dbg !16
+  %v3 = add nsw i32 %v2, %a1, !dbg !17
+  ret i32 %v3, !dbg !18
+}
+
+; Function Attrs: nounwind readnone
+define i32 @f1() local_unnamed_addr #0 !dbg !19 {
+b0:
+  ret i32 0, !dbg !23
+}
+
+; Function Attrs: nounwind readnone speculatable
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+attributes #1 = { nounwind readnone speculatable }
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "QuIC LLVM Hexagon Clang version hexagon-clang-82-1453 (based on LLVM 4.0.0)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !2)
+!1 = !DIFile(filename: "/tmp/1.c", directory: "/local/mnt/workspace")
+!2 = !{}
+!3 = !{i32 2, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "bar", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 1, flags: DIFlagPrototyped, isOptimized: true, unit: !0, variables: !9)
+!6 = !DISubroutineType(types: !7)
+!7 = !{!8, !8, !8}
+!8 = !DIBasicType(name: "long int", size: 32, encoding: DW_ATE_signed)
+!9 = !{!10, !11}
+!10 = !DILocalVariable(name: "x", arg: 1, scope: !5, file: !1, line: 1, type: !8)
+!11 = !DILocalVariable(name: "y", arg: 2, scope: !5, file: !1, line: 1, type: !8)
+!12 = !DILocation(line: 1, column: 15, scope: !5)
+!13 = !DILocation(line: 1, column: 23, scope: !5)
+!14 = !DILocation(line: 2, column: 14, scope: !15)
+!15 = !DILexicalBlockFile(scope: !5, file: !1, discriminator: 1)
+!16 = !DILocation(line: 2, column: 1, scope: !5)
+!17 = !DILocation(line: 4, column: 12, scope: !5)
+!18 = !DILocation(line: 4, column: 3, scope: !5)
+!19 = distinct !DISubprogram(name: "main", scope: !1, file: !1, line: 7, type: !20, isLocal: false, isDefinition: true, scopeLine: 7, isOptimized: true, unit: !0, variables: !2)
+!20 = !DISubroutineType(types: !21)
+!21 = !{!22}
+!22 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!23 = !DILocation(line: 7, column: 14, scope: !19)

Added: llvm/trunk/test/CodeGen/Hexagon/eh_return-r30.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/eh_return-r30.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/eh_return-r30.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/eh_return-r30.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,17 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+; Function Attrs: noreturn nounwind
+define void @f0(i32 %a0, i8* %a1) #0 {
+b0:
+  tail call void @llvm.eh.return.i32(i32 %a0, i8* %a1)
+  unreachable
+}
+
+; Function Attrs: nounwind
+declare void @llvm.eh.return.i32(i32, i8*) #1
+
+attributes #0 = { noreturn nounwind }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/eh_save_restore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/eh_save_restore.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/eh_save_restore.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/eh_save_restore.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,96 @@
+; RUN: llc -O3 -march=hexagon -hexagon-small-data-threshold=0 -disable-packetizer < %s | FileCheck %s
+
+; This test was orignally written to test that we don't save an entire double
+; register if only one of the integer registers needs to be saved. The problem
+; occurs in exception handling, which only emit information for the registers
+; in the callee saved list (and not complete double registers unless both
+; parts of the double registers are used).
+; Overtime, we evolved in to saving the double register and updating the debug
+; information to cover the entire double register.
+
+; Disable the packetizer to avoid complications caused by potentially
+; packetizing one of the stores with allocframe, which would change the
+; relative order of the stores with the CFI instructions.
+
+; CHECK: cfi_startproc
+; CHECK-DAG: cfi_offset r16
+; CHECK-DAG: cfi_offset r17
+; CHECK-DAG: cfi_offset r18
+; CHECK-DAG: cfi_offset r19
+; CHECK: memd(r29+{{.*}}) = r17:16
+; CHECK: memd(r29+{{.*}}) = r19:18
+
+%s.0 = type { i32 }
+
+ at g0 = global i32 0, align 4
+ at g1 = external constant i8*
+
+; Function Attrs: noreturn
+define void @f0(i64 %a0) #0 personality i8* bitcast (i32 (...)* @f2 to i8*) {
+b0:
+  %v0 = alloca %s.0, align 4
+  %v1 = trunc i64 %a0 to i32
+  %v2 = lshr i64 %a0, 32
+  %v3 = trunc i64 %v2 to i32
+  %v4 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0
+  store i32 0, i32* %v4, align 4, !tbaa !0
+  %v5 = load i32, i32* @g0, align 4, !tbaa !5
+  %v6 = or i32 %v5, 1
+  store i32 %v6, i32* @g0, align 4, !tbaa !5
+  %v7 = call i8* @f1(i32 4) #1
+  %v8 = bitcast i8* %v7 to i32*
+  %v9 = bitcast %s.0* %v0 to i8*
+  %v10 = getelementptr inbounds i8, i8* %v9, i32 %v3
+  %v11 = bitcast i8* %v10 to %s.0*
+  %v12 = and i32 %v1, 1
+  %v13 = icmp eq i32 %v12, 0
+  br i1 %v13, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v14 = bitcast i8* %v10 to i8**
+  %v15 = load i8*, i8** %v14, align 4
+  %v16 = add i32 %v1, -1
+  %v17 = getelementptr i8, i8* %v15, i32 %v16
+  %v18 = bitcast i8* %v17 to i32 (%s.0*)**
+  %v19 = load i32 (%s.0*)*, i32 (%s.0*)** %v18, align 4
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v20 = inttoptr i32 %v1 to i32 (%s.0*)*
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v21 = phi i32 (%s.0*)* [ %v19, %b1 ], [ %v20, %b2 ]
+  %v22 = invoke i32 %v21(%s.0* %v11)
+          to label %b4 unwind label %b5
+
+b4:                                               ; preds = %b3
+  store i32 %v22, i32* %v8, align 4, !tbaa !5
+  call void @f4(i8* %v7, i8* bitcast (i8** @g1 to i8*), i8* null) #2
+  unreachable
+
+b5:                                               ; preds = %b3
+  %v23 = landingpad { i8*, i32 }
+          cleanup
+  call void @f3(i8* %v7) #1
+  resume { i8*, i32 } %v23
+}
+
+declare i8* @f1(i32)
+
+declare i32 @f2(...)
+
+declare void @f3(i8*)
+
+declare void @f4(i8*, i8*, i8*)
+
+attributes #0 = { noreturn "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind }
+attributes #2 = { noreturn }
+
+!0 = !{!1, !2, i64 0}
+!1 = !{!"_ZTS1A", !2, i64 0}
+!2 = !{!"int", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/ehabi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/ehabi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/ehabi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/ehabi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,81 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK: GCC_except_table0:
+; CHECK: Call site Encoding = uleb128
+
+target triple = "hexagon"
+
+ at g0 = external constant i8*
+
+define i32 @f0() #0 personality i8* bitcast (i32 (...)* @f3 to i8*) {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i8*
+  %v3 = alloca i32
+  %v4 = alloca i32, align 4
+  store i32 0, i32* %v0
+  store i32 1, i32* %v1, align 4
+  %v5 = call i8* @f1(i32 4) #2
+  %v6 = bitcast i8* %v5 to i32*
+  store i32 20, i32* %v6
+  invoke void @f2(i8* %v5, i8* bitcast (i8** @g0 to i8*), i8* null) #3
+          to label %b6 unwind label %b1
+
+b1:                                               ; preds = %b0
+  %v7 = landingpad { i8*, i32 }
+          catch i8* bitcast (i8** @g0 to i8*)
+  %v8 = extractvalue { i8*, i32 } %v7, 0
+  store i8* %v8, i8** %v2
+  %v9 = extractvalue { i8*, i32 } %v7, 1
+  store i32 %v9, i32* %v3
+  br label %b2
+
+b2:                                               ; preds = %b1
+  %v10 = load i32, i32* %v3
+  %v11 = call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g0 to i8*)) #2
+  %v12 = icmp eq i32 %v10, %v11
+  br i1 %v12, label %b3, label %b5
+
+b3:                                               ; preds = %b2
+  %v13 = load i8*, i8** %v2
+  %v14 = call i8* @f4(i8* %v13) #2
+  %v15 = bitcast i8* %v14 to i32*
+  %v16 = load i32, i32* %v15, align 4
+  store i32 %v16, i32* %v4, align 4
+  store i32 2, i32* %v1, align 4
+  call void @f5() #2
+  br label %b4
+
+b4:                                               ; preds = %b3
+  %v17 = load i32, i32* %v1, align 4
+  ret i32 %v17
+
+b5:                                               ; preds = %b2
+  %v18 = load i8*, i8** %v2
+  %v19 = load i32, i32* %v3
+  %v20 = insertvalue { i8*, i32 } undef, i8* %v18, 0
+  %v21 = insertvalue { i8*, i32 } %v20, i32 %v19, 1
+  resume { i8*, i32 } %v21
+
+b6:                                               ; preds = %b0
+  unreachable
+}
+
+declare i8* @f1(i32)
+
+declare void @f2(i8*, i8*, i8*)
+
+declare i32 @f3(...)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.eh.typeid.for(i8*) #1
+
+declare i8* @f4(i8*)
+
+declare void @f5()
+
+attributes #0 = { "no-frame-pointer-elim"="true" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }
+attributes #3 = { noreturn }

Added: llvm/trunk/test/CodeGen/Hexagon/entryBB-isLoopHdr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/entryBB-isLoopHdr.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/entryBB-isLoopHdr.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/entryBB-isLoopHdr.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,40 @@
+; RUN: llc -march=hexagon -hexagon-hwloop-preheader < %s | FileCheck %s
+
+; check for lack of assertion failures.
+
+; CHECK: %bb.0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.sath(i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.neg(i32) #0
+
+define void @f0(i16 signext %a0) {
+b0:
+  %v0 = icmp slt i16 %a0, 1
+  br i1 %v0, label %b1, label %b3
+
+b1:                                               ; preds = %b2, %b0
+  %v1 = phi i16 [ %v11, %b2 ], [ %a0, %b0 ]
+  %v2 = sext i16 %v1 to i32
+  %v3 = tail call i32 @llvm.hexagon.A2.neg(i32 %v2)
+  %v4 = tail call i32 @llvm.hexagon.A2.sath(i32 %v3)
+  %v5 = trunc i32 %v4 to i16
+  %v6 = shl i32 %v4, 16
+  %v7 = ashr exact i32 %v6, 16
+  %v8 = icmp slt i16 %v5, 0
+  br i1 %v8, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  %v9 = tail call i32 @llvm.hexagon.A2.neg(i32 %v7)
+  %v10 = tail call i32 @llvm.hexagon.A2.sath(i32 %v9)
+  %v11 = trunc i32 %v10 to i16
+  %v12 = icmp slt i16 %v11, 1
+  br i1 %v12, label %b1, label %b3
+
+b3:                                               ; preds = %b2, %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/expand-condsets-copy-lis.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,37 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the compiler doesn't assert because the live interval information
+; isn't updated correctly during the Hexagon Expand Condsets pass. The pass
+; wasn't updating the information when converting a mux with the same operands
+; into a copy. When this occurs, the pass needs to update the liveness
+; information for the predicate register, which is removed.
+
+define void @f0(i32 %a0) unnamed_addr {
+b0:
+  %v0 = or i32 undef, %a0
+  %v1 = or i32 undef, %v0
+  br label %b1
+
+b1:                                               ; preds = %b3, %b0
+  %v2 = phi i32 [ %v9, %b3 ], [ 0, %b0 ]
+  %v3 = phi i32 [ 0, %b3 ], [ %v1, %b0 ]
+  %v4 = srem i32 %v2, 4
+  %v5 = icmp eq i32 %v4, 0
+  %v6 = select i1 %v5, i32 %v1, i32 %v3
+  %v7 = shl i32 %v6, 8
+  %v8 = add i32 0, %v7
+  br i1 undef, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  store i32 %v8, i32* undef, align 4
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v9 = add nuw nsw i32 %v2, 1
+  %v10 = icmp slt i32 %v9, undef
+  br i1 %v10, label %b1, label %b4
+
+b4:                                               ; preds = %b3
+  unreachable
+}

Added: llvm/trunk/test/CodeGen/Hexagon/expand-condsets-dead.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/expand-condsets-dead.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/expand-condsets-dead.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/expand-condsets-dead.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,47 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+; Test that the dead and kill flags are not added incorrectly during the
+; Hexagon Expand Condsets pass. The pass shouldn't add a kill flag to a use that
+; is tied to a definition, and the pass shouldn't remove the dead flag for a
+; definition that is really dead. The removal of the dead flag causes an assert
+; in the Machine Scheduler when querying live interval information.
+
+define void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b3, %b0
+  %v0 = load i16, i16* undef, align 4
+  %v1 = sext i16 %v0 to i32
+  %v2 = and i32 %v1, 7
+  %v3 = sub nsw i32 8, %v2
+  %v4 = sub nsw i32 8, 0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v5 = phi i8* [ undef, %b1 ], [ %v16, %b2 ]
+  %v6 = phi i32 [ 4, %b1 ], [ %v17, %b2 ]
+  %v7 = load i8, i8* undef, align 1
+  %v8 = zext i8 %v7 to i32
+  %v9 = mul nuw nsw i32 %v8, %v3
+  %v10 = add nuw nsw i32 0, %v9
+  %v11 = mul nuw nsw i32 %v10, %v4
+  %v12 = add nuw nsw i32 0, %v11
+  %v13 = lshr i32 %v12, 6
+  %v14 = trunc i32 %v13 to i8
+  store i8 %v14, i8* %v5, align 1
+  %v15 = getelementptr inbounds i8, i8* %v5, i32 1
+  %v16 = select i1 undef, i8* undef, i8* %v15
+  %v17 = add nsw i32 %v6, -1
+  %v18 = icmp eq i32 %v17, 0
+  br i1 %v18, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  br i1 undef, label %b1, label %b4
+
+b4:                                               ; preds = %b3
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" }

Added: llvm/trunk/test/CodeGen/Hexagon/expand-condsets-pred-undef2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/expand-condsets-pred-undef2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/expand-condsets-pred-undef2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/expand-condsets-pred-undef2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,14 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: if{{.*}}add
+; CHECK: if{{.*}}sub
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = add i32 %a0, %a2
+  %v1 = sub i32 %a1, %a2
+  %v2 = select i1 undef, i32 %v0, i32 %v1
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/expand-condsets.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/expand-condsets.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/expand-condsets.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/expand-condsets.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,116 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; Check if all or's in the loop were predicated.
+; CHECK: if{{.*}} = or
+; CHECK: if{{.*}} = or
+; CHECK: if{{.*}} = or
+; CHECK: if{{.*}} = or
+; CHECK: endloop
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i32* nocapture %a1, i32* nocapture %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) #0 {
+b0:
+  %v0 = icmp ugt i32 %a0, 32
+  %v1 = lshr i32 %a0, 6
+  %v2 = select i1 %v0, i32 %v1, i32 1
+  %v3 = icmp eq i32 %v2, 0
+  br i1 %v3, label %b9, label %b1
+
+b1:                                               ; preds = %b0
+  %v4 = lshr i32 %a0, 2
+  %v5 = getelementptr inbounds i32, i32* %a1, i32 %v4
+  br label %b2
+
+b2:                                               ; preds = %b7, %b1
+  %v6 = phi i32* [ %v5, %b1 ], [ %v9, %b7 ]
+  %v7 = phi i32* [ %a1, %b1 ], [ %v49, %b7 ]
+  %v8 = phi i32 [ 0, %b1 ], [ %v55, %b7 ]
+  %v9 = getelementptr i32, i32* %v6, i32 64
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v10 = phi i32 [ 2, %b2 ], [ %v46, %b3 ]
+  %v11 = phi i32 [ 1, %b2 ], [ %v45, %b3 ]
+  %v12 = phi i32* [ %v6, %b2 ], [ %v23, %b3 ]
+  %v13 = phi i32* [ %v7, %b2 ], [ %v19, %b3 ]
+  %v14 = phi i32 [ 0, %b2 ], [ %v47, %b3 ]
+  %v15 = phi i32 [ 0, %b2 ], [ %v41, %b3 ]
+  %v16 = phi i32 [ 0, %b2 ], [ %v44, %b3 ]
+  %v17 = getelementptr inbounds i32, i32* %v13, i32 1
+  %v18 = load i32, i32* %v13, align 4, !tbaa !0
+  %v19 = getelementptr inbounds i32, i32* %v13, i32 2
+  %v20 = load i32, i32* %v17, align 4, !tbaa !0
+  %v21 = getelementptr inbounds i32, i32* %v12, i32 1
+  %v22 = load i32, i32* %v12, align 4, !tbaa !0
+  %v23 = getelementptr inbounds i32, i32* %v12, i32 2
+  %v24 = load i32, i32* %v21, align 4, !tbaa !0
+  %v25 = tail call i32 @llvm.hexagon.A2.add(i32 %v22, i32 %a4)
+  %v26 = tail call i32 @llvm.hexagon.A2.sub(i32 %v25, i32 %a3)
+  %v27 = tail call i32 @llvm.hexagon.A2.add(i32 %v24, i32 %a4)
+  %v28 = tail call i32 @llvm.hexagon.A2.sub(i32 %v27, i32 %a3)
+  %v29 = tail call i32 @llvm.hexagon.A2.sub(i32 %v18, i32 %a5)
+  %v30 = tail call i32 @llvm.hexagon.A2.add(i32 %v29, i32 %a6)
+  %v31 = tail call i32 @llvm.hexagon.A2.sub(i32 %v20, i32 %a5)
+  %v32 = tail call i32 @llvm.hexagon.A2.add(i32 %v31, i32 %a6)
+  %v33 = icmp ugt i32 %v26, %v18
+  %v34 = select i1 %v33, i32 0, i32 %v11
+  %v35 = or i32 %v34, %v15
+  %v36 = icmp ult i32 %v30, %v22
+  %v37 = select i1 %v36, i32 %v11, i32 0
+  %v38 = or i32 %v37, %v16
+  %v39 = icmp ugt i32 %v28, %v20
+  %v40 = select i1 %v39, i32 0, i32 %v10
+  %v41 = or i32 %v35, %v40
+  %v42 = icmp ult i32 %v32, %v24
+  %v43 = select i1 %v42, i32 %v10, i32 0
+  %v44 = or i32 %v38, %v43
+  %v45 = shl i32 %v11, 2
+  %v46 = shl i32 %v10, 2
+  %v47 = add i32 %v14, 1
+  %v48 = icmp eq i32 %v47, 32
+  br i1 %v48, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v49 = getelementptr i32, i32* %v7, i32 64
+  br i1 %v0, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  %v50 = getelementptr inbounds i32, i32* %a2, i32 %v8
+  store i32 %v41, i32* %v50, align 4, !tbaa !0
+  %v51 = add i32 %v8, %v2
+  %v52 = getelementptr inbounds i32, i32* %a2, i32 %v51
+  store i32 %v44, i32* %v52, align 4, !tbaa !0
+  br label %b7
+
+b6:                                               ; preds = %b4
+  %v53 = or i32 %v41, %v44
+  %v54 = getelementptr inbounds i32, i32* %a2, i32 %v8
+  store i32 %v53, i32* %v54, align 4, !tbaa !0
+  br label %b7
+
+b7:                                               ; preds = %b6, %b5
+  %v55 = add i32 %v8, 1
+  %v56 = icmp eq i32 %v55, %v2
+  br i1 %v56, label %b8, label %b2
+
+b8:                                               ; preds = %b7
+  br label %b9
+
+b9:                                               ; preds = %b8, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.sub(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.add(i32, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/extlow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/extlow.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/extlow.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/extlow.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,12 @@
+; RUN: llc -march=hexagon -O0 %s -o - | llvm-mc -arch=hexagon -filetype=obj | llvm-objdump -d - | FileCheck %s
+
+; CHECK: immext(#16777216)
+; CHECK-NEXT: r0 = add(r0,##16777279)
+
+define void @f0(i32 %a0) {
+b0:
+  %v0 = add i32 16777279, %a0
+  %v1 = alloca i32, align 4
+  store i32 %v0, i32* %v1, align 4
+  ret void
+}

Added: llvm/trunk/test/CodeGen/Hexagon/extract_0bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/extract_0bits.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/extract_0bits.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/extract_0bits.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,15 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9:]+}} = #0
+
+; Function Attrs: nounwind readnone
+define i32 @f0() #0 {
+b0:
+  %v0 = tail call i64 @llvm.hexagon.S4.extractp(i64 -1, i32 0, i32 1)
+  %v1 = trunc i64 %v0 to i32
+  ret i32 %v1
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S4.extractp(i64, i32, i32) #0
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/extractu_0bits.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/extractu_0bits.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/extractu_0bits.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/extractu_0bits.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,15 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9:]+}} = #0
+
+; Function Attrs: nounwind readnone
+define i32 @f0() #0 {
+b0:
+  %v0 = tail call i64 @llvm.hexagon.S2.extractup(i64 -1, i32 0, i32 1)
+  %v1 = trunc i64 %v0 to i32
+  ret i32 %v1
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.extractup(i64, i32, i32) #0
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/find-loop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/find-loop.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/find-loop.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/find-loop.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,64 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+; Test that the compiler doesn't assert when attempting to find a
+; loop instruction that has been deleted, so FindLoopInstr returns
+; the loop instruction from a different loop.
+
+ at g0 = external global i32
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = alloca i64, align 8
+  %v1 = bitcast i64* %v0 to [2 x i32]*
+  %v2 = load i32, i32* @g0, align 4
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v3 = phi i32 [ %v4, %b1 ], [ 64, %b0 ]
+  %v4 = add nsw i32 %v3, 1
+  %v5 = icmp slt i32 %v4, %v2
+  br i1 %v5, label %b1, label %b2
+
+b2:                                               ; preds = %b6, %b3, %b1, %b0
+  br label %b4
+
+b3:                                               ; preds = %b4
+  br i1 undef, label %b4, label %b2
+
+b4:                                               ; preds = %b3, %b2
+  %v6 = icmp slt i32 undef, 1
+  br i1 %v6, label %b3, label %b5
+
+b5:                                               ; preds = %b5, %b4
+  %v7 = phi i32 [ %v18, %b5 ], [ 1, %b4 ]
+  %v8 = phi i32 [ %v19, %b5 ], [ 0, %b4 ]
+  %v9 = add nsw i32 %v8, 0
+  %v10 = lshr i32 %v9, 5
+  %v11 = getelementptr inbounds [2 x i32], [2 x i32]* %v1, i32 0, i32 %v10
+  %v12 = load i32, i32* %v11, align 4
+  %v13 = and i32 %v9, 31
+  %v14 = shl i32 1, %v13
+  %v15 = and i32 %v12, %v14
+  %v16 = icmp ne i32 %v15, 0
+  %v17 = zext i1 %v16 to i32
+  %v18 = and i32 %v17, %v7
+  %v19 = add nsw i32 %v8, 1
+  %v20 = icmp eq i32 %v19, 1
+  br i1 %v20, label %b6, label %b5
+
+b6:                                               ; preds = %b5
+  %v21 = icmp eq i32 %v18, 0
+  br i1 %v21, label %b2, label %b7
+
+b7:                                               ; preds = %b6
+  tail call void @f1() #1
+  unreachable
+}
+
+; Function Attrs: nounwind
+declare void @f1() #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/float-bitcast.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/float-bitcast.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/float-bitcast.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/float-bitcast.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,41 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; All of these should be no-ops. Check this with -O0, to make sure
+; that no register copies are generated at any time.
+
+; CHECK-LABEL: f0:
+; CHECK-NOT: r{{[0-9]+}} = r{{[0-9]+}}
+; CHECK: jumpr r31
+define float @f0(i32 %a0) #0 {
+b0:
+  %v0 = bitcast i32 %a0 to float
+  ret float %v0
+}
+
+; CHECK-LABEL: f1:
+; CHECK-NOT: r{{[0-9]+}} = r{{[0-9]+}}
+; CHECK: jumpr r31
+define i32 @f1(float %a0) #0 {
+b0:
+  %v0 = bitcast float %a0 to i32
+  ret i32 %v0
+}
+
+; CHECK-LABEL: f2:
+; CHECK-NOT: r{{[0-9:]*}} = r{{[0-9:]*}}
+; CHECK: jumpr r31
+define double @f2(i64 %a0) #0 {
+b0:
+  %v0 = bitcast i64 %a0 to double
+  ret double %v0
+}
+
+; CHECK-LABEL: f3:
+; CHECK-NOT: r{{[0-9:]*}} = r{{[0-9:]*}}
+; CHECK: jumpr r31
+define i64 @f3(double %a0) #0 {
+b0:
+  %v0 = bitcast double %a0 to i64
+  ret i64 %v0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/float-const64-G0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/float-const64-G0.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/float-const64-G0.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/float-const64-G0.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,14 @@
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s | FileCheck %s
+;
+; Check that no CONST64's are emitted for a -G0, mv5 compile
+; CHECK-NOT: CONST
+
+; Function Attrs: nounwind readnone
+define double @f0(double %a0) #0 {
+b0:
+  %v0 = fmul double %a0, 0x400921FB53C8D4F1
+  %v1 = fmul double %v0, %a0
+  ret double %v1
+}
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/float-gen-cmpop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/float-gen-cmpop.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/float-gen-cmpop.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/float-gen-cmpop.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,72 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; CHECK-LABEL: f0:
+; CHECK: p{{[0-9]+}} = sfcmp.ge(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK: p{{[0-9]+}} = sfcmp.gt(r{{[0-9]+}},r{{[0-9]+}})
+define i32 @f0(float* nocapture %a0) #0 {
+b0:
+  %v0 = load float, float* %a0, align 4, !tbaa !0
+  %v1 = fcmp olt float %v0, 6.000000e+01
+  br i1 %v1, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v2 = getelementptr inbounds float, float* %a0, i32 1
+  %v3 = load float, float* %v2, align 4, !tbaa !0
+  %v4 = fcmp ogt float %v3, 0x3FECCCCCC0000000
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v5 = phi i1 [ false, %b0 ], [ %v4, %b1 ]
+  %v6 = zext i1 %v5 to i32
+  ret i32 %v6
+}
+
+; CHECK-LABEL: f1:
+; CHECK: p{{[0-9]+}} = sfcmp.eq(r{{[0-9]+}},r{{[0-9]+}})
+define i32 @f1(float* nocapture %a0) #0 {
+b0:
+  %v0 = load float, float* %a0, align 4, !tbaa !0
+  %v1 = fcmp oeq float %v0, 6.000000e+01
+  %v2 = zext i1 %v1 to i32
+  ret i32 %v2
+}
+
+; CHECK-LABEL: f2:
+; CHECK: p{{[0-9]+}} = dfcmp.ge(r{{[0-9]+}}:{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
+; CHECK: p{{[0-9]+}} = dfcmp.gt(r{{[0-9]+}}:{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
+define i32 @f2(double* nocapture %a0) #0 {
+b0:
+  %v0 = load double, double* %a0, align 8, !tbaa !4
+  %v1 = fcmp olt double %v0, 6.000000e+01
+  br i1 %v1, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v2 = getelementptr inbounds double, double* %a0, i32 1
+  %v3 = load double, double* %v2, align 8, !tbaa !4
+  %v4 = fcmp ogt double %v3, 0x3FECCCCCC0000000
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v5 = phi i1 [ false, %b0 ], [ %v4, %b1 ]
+  %v6 = zext i1 %v5 to i32
+  ret i32 %v6
+}
+
+define i32 @f3(double* nocapture %a0) #0 {
+b0:
+  %v0 = load double, double* %a0, align 8, !tbaa !4
+  %v1 = fcmp oeq double %v0, 6.000000e+01
+  %v2 = zext i1 %v1 to i32
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind readonly "target-cpu"="hexagonv55" "no-nans-fp-math"="true" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"float", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"double", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/fltnvjump.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fltnvjump.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/fltnvjump.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/fltnvjump.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,216 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; We do not want to see a new value compare after the convert
+; CHECK: r{{[0-9]+}} = convert_df2w
+; CHECK-NOT: if (!cmp.eq(r{{[0-9]+}}.new,r{{[0-9]+}})jump
+; r3 = convert_df2w(r1:0):chop
+; if (!cmp.eq(r3.new, r2)) jump:nt .LBB0_13
+
+target triple = "hexagon"
+
+%s.0 = type { %s.1, i8*, i8* }
+%s.1 = type { i16, i16, i32 }
+%s.2 = type { i8, i32, i32, i16, i16, i16, i32, i8, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, %s.3* }
+%s.3 = type { [2 x i16], i16, i16, i16, i16, [13 x i16], i16, i16, [2 x i16*], [25 x i16], [49 x i16], [6 x i16], [49 x i16] }
+
+ at g0 = internal constant %s.0 { %s.1 { i16 705, i16 0, i32 16 }, i8* getelementptr inbounds ([110 x i8], [110 x i8]* @g1, i32 0, i32 0), i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g2, i32 0, i32 0) }, align 4
+ at g1 = private unnamed_addr constant [110 x i8] c"Assertion ............................................................................................ failed\00", align 1
+ at g2 = private unnamed_addr constant [13 x i8] c"............\00", align 1
+
+define signext i16 @f0(%s.2* %a0) #0 {
+b0:
+  %v0 = alloca i16, align 2
+  %v1 = alloca i16, align 2
+  %v2 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 19
+  %v3 = load %s.3*, %s.3** %v2, align 4, !tbaa !0
+  %v4 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 12, i32 0
+  %v5 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 2
+  %v6 = call signext i16 @f1(i16* %v4, i16* %v5, %s.2* %a0)
+  %v7 = icmp eq i16 %v6, 0
+  br i1 %v7, label %b1, label %b13
+
+b1:                                               ; preds = %b0
+  %v8 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 11
+  %v9 = load i16, i16* %v8, align 2, !tbaa !4
+  %v10 = sext i16 %v9 to i32
+  %v11 = load i16, i16* %v5, align 2, !tbaa !4
+  %v12 = sext i16 %v11 to i32
+  %v13 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v10, i32 %v12)
+  %v14 = trunc i32 %v13 to i16
+  %v15 = icmp sgt i16 %v14, 0
+  br i1 %v15, label %b13, label %b2
+
+b2:                                               ; preds = %b1
+  %v16 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 8, i32 1
+  %v17 = load i16*, i16** %v16, align 4, !tbaa !0
+  call void @f2(i16* %v17, i16* %v1, i16* %v4, i16 signext %v11, i16 signext %v9)
+  %v18 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 8, i32 0
+  %v19 = load i16*, i16** %v18, align 4, !tbaa !0
+  %v20 = load i16*, i16** %v16, align 4, !tbaa !0
+  %v21 = load i16, i16* %v1, align 2, !tbaa !4
+  call void @f3(i16* %v19, i16* %v0, i16* %v20, i16 signext %v21)
+  %v22 = load i16, i16* %v0, align 2, !tbaa !4
+  %v23 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 0, i32 0
+  store i16 %v22, i16* %v23, align 2, !tbaa !4
+  %v24 = load i16, i16* %v1, align 2, !tbaa !4
+  %v25 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 0, i32 1
+  store i16 %v24, i16* %v25, align 2, !tbaa !4
+  %v26 = load i16, i16* %v0, align 2, !tbaa !4
+  %v27 = sext i16 %v26 to i32
+  %v28 = icmp slt i16 %v26, 1
+  br i1 %v28, label %b13, label %b3
+
+b3:                                               ; preds = %b2
+  %v29 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 48, i32 1)
+  %v30 = call i32 @llvm.hexagon.A2.sath(i32 %v29)
+  %v31 = shl i32 %v30, 16
+  %v32 = ashr exact i32 %v31, 16
+  %v33 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v27, i32 %v32)
+  %v34 = trunc i32 %v33 to i16
+  %v35 = icmp sgt i16 %v34, 0
+  br i1 %v35, label %b13, label %b4
+
+b4:                                               ; preds = %b3
+  %v36 = load i16*, i16** %v18, align 4, !tbaa !0
+  %v37 = load i16, i16* %v36, align 2, !tbaa !4
+  %v38 = getelementptr inbounds i16, i16* %v36, i32 %v27
+  %v39 = load i16, i16* %v38, align 2, !tbaa !4
+  %v40 = sext i16 %v37 to i32
+  %v41 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v40, i32 32)
+  %v42 = trunc i32 %v41 to i16
+  %v43 = icmp sgt i16 %v42, 0
+  br i1 %v43, label %b13, label %b5
+
+b5:                                               ; preds = %b4
+  %v44 = sext i16 %v39 to i32
+  %v45 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v40, i32 %v44)
+  %v46 = and i32 %v45, 32768
+  %v47 = icmp eq i32 %v46, 0
+  br i1 %v47, label %b13, label %b6
+
+b6:                                               ; preds = %b5
+  %v48 = load i16, i16* %v1, align 2, !tbaa !4
+  %v49 = sext i16 %v48 to i32
+  %v50 = load i16*, i16** %v16, align 4, !tbaa !0
+  %v51 = getelementptr inbounds i16, i16* %v50, i32 %v49
+  %v52 = load i16, i16* %v51, align 2, !tbaa !4
+  %v53 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 14
+  %v54 = load i16, i16* %v53, align 2, !tbaa !4
+  %v55 = icmp eq i16 %v54, 0
+  br i1 %v55, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  %v56 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 1
+  store i16 1, i16* %v56, align 2, !tbaa !4
+  br label %b11
+
+b8:                                               ; preds = %b6
+  %v57 = load i16, i16* %v50, align 2, !tbaa !4
+  %v58 = sext i16 %v57 to i32
+  %v59 = sext i16 %v52 to i32
+  %v60 = call signext i16 @f4(i32 %v58, i32 %v59)
+  %v61 = sext i16 %v60 to i32
+  %v62 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v61, i32 2)
+  %v63 = call i32 @llvm.hexagon.A2.sath(i32 %v62)
+  %v64 = shl i32 %v63, 16
+  %v65 = ashr exact i32 %v64, 16
+  %v66 = load i16, i16* %v53, align 2, !tbaa !4
+  %v67 = sext i16 %v66 to i32
+  %v68 = call i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32 1024, i32 %v65, i32 %v67)
+  %v69 = shl i32 %v68, 16
+  %v70 = ashr exact i32 %v69, 16
+  %v71 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v70, i32 1)
+  %v72 = call i32 @llvm.hexagon.A2.sath(i32 %v71)
+  %v73 = shl i32 %v72, 16
+  %v74 = ashr exact i32 %v73, 16
+  %v75 = call i32 @llvm.hexagon.S2.asr.r.r.sat(i32 %v74, i32 10)
+  %v76 = call i32 @llvm.hexagon.A2.sath(i32 %v75)
+  %v77 = shl i32 %v76, 16
+  %v78 = ashr exact i32 %v77, 16
+  %v79 = sitofp i16 %v66 to float
+  %v80 = sitofp i16 %v52 to float
+  %v81 = sitofp i16 %v57 to float
+  %v82 = fdiv float %v80, %v81
+  %v83 = call float @f7(float %v82, i32 0)
+  %v84 = fmul float %v79, %v83
+  %v85 = fdiv float %v84, 0x3FE62E4300000000
+  %v86 = fpext float %v85 to double
+  %v87 = fadd double %v86, 5.000000e-01
+  %v88 = fptosi double %v87 to i32
+  %v89 = icmp eq i32 %v78, %v88
+  br i1 %v89, label %b10, label %b9
+
+b9:                                               ; preds = %b8
+  call void @f5(%s.0* @g0) #2
+  unreachable
+
+b10:                                              ; preds = %b8
+  %v90 = trunc i32 %v76 to i16
+  %v91 = icmp eq i32 %v78, 0
+  %v92 = select i1 %v91, i16 1, i16 %v90
+  %v93 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 1
+  store i16 %v92, i16* %v93, align 2, !tbaa !4
+  br label %b11
+
+b11:                                              ; preds = %b10, %b7
+  %v94 = phi i16 [ %v92, %b10 ], [ 1, %b7 ]
+  %v95 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 7
+  store i16 %v94, i16* %v95, align 2, !tbaa !4
+  %v96 = sext i16 %v94 to i32
+  %v97 = call i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32 %v96, i32 5)
+  %v98 = trunc i32 %v97 to i16
+  %v99 = icmp sgt i16 %v98, 0
+  br i1 %v99, label %b13, label %b12
+
+b12:                                              ; preds = %b11
+  %v100 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 11, i32 0
+  %v101 = load i16*, i16** %v18, align 4, !tbaa !0
+  %v102 = load i16, i16* %v0, align 2, !tbaa !4
+  call void @f6(i16* %v100, i16 signext %v94, i16* %v101, i16 signext %v102)
+  %v103 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 3
+  store i16 %v37, i16* %v103, align 2, !tbaa !4
+  %v104 = getelementptr inbounds %s.3, %s.3* %v3, i32 0, i32 4
+  store i16 %v39, i16* %v104, align 2, !tbaa !4
+  br label %b13
+
+b13:                                              ; preds = %b12, %b11, %b5, %b4, %b3, %b2, %b1, %b0
+  %v105 = phi i16 [ 0, %b12 ], [ -1, %b1 ], [ -1, %b0 ], [ -1, %b3 ], [ -1, %b2 ], [ -1, %b5 ], [ -1, %b4 ], [ -1, %b11 ]
+  ret i16 %v105
+}
+
+declare signext i16 @f1(i16*, i16*, %s.2*) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.subh.l16.sat.ll(i32, i32) #1
+
+declare void @f2(i16*, i16*, i16*, i16 signext, i16 signext) #0
+
+declare void @f3(i16*, i16*, i16*, i16 signext) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.sath(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.asr.r.r.sat(i32, i32) #1
+
+declare signext i16 @f4(i32, i32) #0
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.M2.mpy.acc.sat.ll.s1(i32, i32, i32) #1
+
+; Function Attrs: noreturn
+declare void @f5(%s.0*) #2
+
+declare void @f6(i16*, i16 signext, i16*, i16 signext) #0
+
+declare float @f7(float, i32) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noreturn }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"short", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/fmadd.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/fmadd.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/fmadd.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/fmadd.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,20 @@
+; RUN: llc -march=hexagon -fp-contract=fast < %s | FileCheck %s
+
+ at g0 = global float 0.000000e+00, align 4
+ at g1 = global float 1.000000e+00, align 4
+ at g2 = global float 2.000000e+00, align 4
+
+; CHECK: r{{[0-9]+}} += sfmpy(r{{[0-9]+}},r{{[0-9]+}})
+define void @f0() #0 {
+b0:
+  %v0 = load float, float* @g0, align 4
+  %v1 = load float, float* @g1, align 4
+  %v2 = load float, float* @g2, align 4
+  %v3 = alloca float, align 4
+  %v4 = fmul float %v0, %v1
+  %v5 = fadd float %v2, %v4
+  store float %v5, float* %v3, align 4
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/getBlockAddress.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/getBlockAddress.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/getBlockAddress.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/getBlockAddress.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  call void bitcast (void (...)* @f1 to void (i8*)*)(i8* blockaddress(@f0, %b1))
+  br label %b1
+
+b1:                                               ; preds = %b2, %b0
+  ret void
+
+b2:                                               ; No predecessors!
+  indirectbr i8* undef, [label %b1]
+}
+
+declare void @f1(...)
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/glob-align-volatile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/glob-align-volatile.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/glob-align-volatile.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/glob-align-volatile.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,23 @@
+; RUN: opt -Os -march=hexagon -S < %s | FileCheck %s
+; Don't reset the alignment on the struct to 1.
+; CHECK: align 4
+
+target triple = "hexagon"
+
+%s.0 = type <{ i32, [2 x i8], [2 x i8] }>
+
+; Function Attrs: nounwind optsize
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = inttoptr i32 %a0 to %s.0*
+  %v1 = getelementptr inbounds %s.0, %s.0* %v0, i32 0, i32 0
+  %v2 = load volatile i32, i32* %v1, align 4, !tbaa !0
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind optsize }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/global-const-gep.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/global-const-gep.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/global-const-gep.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/global-const-gep.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,17 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that global constant GEPs are calculated correctly
+;
+target triple = "hexagon-unknown--elf"
+
+%s.0 = type { i32, i64, [100 x i8] }
+
+ at g0 = common global %s.0 zeroinitializer, align 8
+ at g1 = global i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2, i32 10), align 4
+; CHECK-LABEL: g1:
+; CHECK: .word g0+26
+
+ at g2 = common global [100 x i8] zeroinitializer, align 8
+ at g3 = global i8* getelementptr inbounds ([100 x i8], [100 x i8]* @g2, i32 0, i32 10), align 4
+; CHECK-LABEL: g3:
+; CHECK: .word g2+10

Added: llvm/trunk/test/CodeGen/Hexagon/global-ctor-pcrel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/global-ctor-pcrel.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/global-ctor-pcrel.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/global-ctor-pcrel.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,51 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: pcrelR0
+
+target triple = "hexagon"
+
+%s.0 = type { i32, i32 }
+
+ at g0 = global %s.0 zeroinitializer, align 4
+
+ at e0 = alias void (%s.0*, i32, i32), void (%s.0*, i32, i32)* @f0
+
+; Function Attrs: nounwind
+define void @f0(%s.0* %a0, i32 %a1, i32 %a2) unnamed_addr #0 align 2 {
+b0:
+  %v0 = alloca %s.0*, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i32, align 4
+  store %s.0* %a0, %s.0** %v0, align 4
+  store i32 %a1, i32* %v1, align 4
+  store i32 %a2, i32* %v2, align 4
+  %v3 = load %s.0*, %s.0** %v0
+  %v4 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 0
+  %v5 = load i32, i32* %v2, align 4
+  store i32 %v5, i32* %v4, align 4
+  %v6 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 1
+  %v7 = load i32, i32* %v1, align 4
+  store i32 %v7, i32* %v6, align 4
+  ret void
+}
+
+define internal void @f1() {
+b0:
+  call void @e0(%s.0* @g0, i32 3, i32 7)
+  ret void
+}
+
+; Function Attrs: nounwind
+define i32 @f2() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 0, i32* %v0
+  ret i32 0
+}
+
+define internal void @f3() {
+b0:
+  call void @f1()
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/global64bitbug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/global64bitbug.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/global64bitbug.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/global64bitbug.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,9 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon-unknown-linux-gnu"
+
+; Make sure we can emit globals whose size is not a multiple of 64bit.
+; We used to assert here.
+ at switch.table = private unnamed_addr constant [6 x i928] [i928 744282853678701455922507579277316643178128753343813693743423064681488139394677769633078380312040969226121498541966016087590661425559764997, i928 744282853678701455922507579277316643178128753343813693743423064681488139394677769633078380312040969226121498541966016087590661425559764997, i928 744282853678701455922507579277316643178128753343813693743423064681488139394677769633078380312040969226121498541966016087590661425559764997, i928 744282853678701455922507579277316643178128753343813693743423064681488139394677769633078378850539331895218580338281183371307641769627222021, i928 744282853678701455922507579277316643178128753343813693743423064681488139394677769633078377389037694564315662134596350655024622113694679045, i928 744282853678701455922507579277316643178128753343813693743423064681488139394677769633078377389037694564315662134596350655024622113694679045]
+

Added: llvm/trunk/test/CodeGen/Hexagon/hello-world-v55.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hello-world-v55.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hello-world-v55.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hello-world-v55.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,16 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: call puts
+
+ at g0 = private unnamed_addr constant [13 x i8] c"Hello World!\00"
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = tail call i32 @puts(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g0, i32 0, i32 0))
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare i32 @puts(i8* nocapture) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/hello-world-v60.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hello-world-v60.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hello-world-v60.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hello-world-v60.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,16 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: call puts
+
+ at g0 = private unnamed_addr constant [13 x i8] c"Hello World!\00"
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = tail call i32 @puts(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g0, i32 0, i32 0))
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare i32 @puts(i8* nocapture) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }

Added: llvm/trunk/test/CodeGen/Hexagon/hexagon-tfr-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hexagon-tfr-add.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hexagon-tfr-add.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hexagon-tfr-add.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,175 @@
+; RUN: llc -march=hexagon -O2 -disable-hexagon-amodeopt < %s | FileCheck %s --check-prefix=CHECK-ADDI
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { i8, i8, %s.41, %s.1, %s.2, i8, %s.22, i8, %s.3, i8, i8, %s.23, %s.23, %s.4, i8, %s.5, %s.6, %s.10, %s.14, %s.44, i16, i8, i32, i16, i16, %s.16, i8, i8, i16, i8, i8, i32, i8, [8 x %s.17], i8, i8, i8, i8, i8, i64, i64, i64, i8, i8, i8, i8, i8, i8, i16, i16, i8, i8, i16, i16, i16, i16, i16, i8, i8, i32, i8, i32, i32, i8, [256 x %s.22], [256 x i8], i8, i8, %s.18, i8, i8, i8, i8, i16, i16, i16, i8, i32, i8, i8, i8, i8, i16, i32, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i16, i8, i8, i8, i8, i8, i8, i32, i64, i64, i8, %s.22, %s.23, i8, i8, i8, i8, i8, i8, i8, i16, i32, [256 x %s.22], i8, i8, %s.25, %s.26, i8, i8, %s.27, i8, i8, i8, i8, i8, i8, i8, i8, %s.41, i8, i8, i8, %s.28, i8, %s.30, %s.33, %s.33, %s.33, %s.33, %s.33, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, i8, %s.34, i8, i8, %s.38, i8, i8, %s.40, i8, i8, i8, i8, i8, %s.41, i8, i8, i8, i32, %s.42, i8, i16, [32 x i16], i8, i8, %s.43, i8, i8, i8, i8, i8, i8, i8, i8, %s.44, i8, i8, i8, i8, i32, i8, i8, i8, i8, i8 }
+%s.1 = type { i32, [30 x %s.16] }
+%s.2 = type { [10 x %s.27], i8, i8 }
+%s.3 = type { i8, %s.41 }
+%s.4 = type { i8, i8, i8* }
+%s.5 = type { %s.22, %s.1, i8, i8, i64, i8, i8, i32, i8, i8, i8, %s.34, i8, i8, i32, i8 }
+%s.6 = type { i64, i8, i8, %s.7, i8, i8, %s.34, %s.34, i8, i8, %s.26 }
+%s.7 = type { i32, [256 x %s.8] }
+%s.8 = type { %s.9, i8 }
+%s.9 = type { [3 x i8] }
+%s.10 = type { i32, [40 x %s.11] }
+%s.11 = type { %s.41, i8, i8, i8, i32, %s.12, i32 }
+%s.12 = type { i32, %s.13, i8 }
+%s.13 = type { i8, [48 x i8] }
+%s.14 = type { i8, [10 x %s.15] }
+%s.15 = type { i16, i8, %s.41, i8, i8 }
+%s.16 = type { %s.41, [2 x i8] }
+%s.17 = type { i8, i32 }
+%s.18 = type { %s.19, i8, %s.20, i8, i8 }
+%s.19 = type { i8, i8, i8, i8 }
+%s.20 = type { i32, [40 x %s.21] }
+%s.21 = type { %s.9, i8, i8, i8, i8, i8, i32, %s.12, i32 }
+%s.22 = type { i8, %s.41, i32 }
+%s.23 = type { %s.41, i16, i16, i16, i32, i8, i16, i8, i16, i8, [8 x i8], i8, i8, i8, %s.24, i8, %s.28 }
+%s.24 = type { %s.16, [1 x i8] }
+%s.25 = type { i64, i64, i64, i64 }
+%s.26 = type { i8, i8, i8, i8, [12 x i8] }
+%s.27 = type { %s.9, [2 x i8] }
+%s.28 = type { %s.41, [6 x %s.29], i8 }
+%s.29 = type { %s.41, i16 }
+%s.30 = type { i8, [16 x %s.31] }
+%s.31 = type { i32, i16, i8, i8, [32 x %s.32] }
+%s.32 = type { i32, i8, i8 }
+%s.33 = type { i8, [16 x i16] }
+%s.34 = type { i32, i32, [10 x %s.35], %s.37 }
+%s.35 = type { %s.36, i8, i32, i8, %s.36 }
+%s.36 = type { %s.25 }
+%s.37 = type { i8, i8 }
+%s.38 = type { i16, [64 x %s.39] }
+%s.39 = type { i16, i8, i16 }
+%s.40 = type { i8, [3 x i8], i8 }
+%s.41 = type { [3 x i8], i8, [3 x i8] }
+%s.42 = type { i16, i16, [32 x i16], [32 x %s.41], [32 x i8] }
+%s.43 = type { i8, i8, i8, i8, [9 x i16] }
+%s.44 = type { %s.45, %s.47 }
+%s.45 = type { %s.46, i32, i8 }
+%s.46 = type { %s.46*, %s.46* }
+%s.47 = type { %s.48 }
+%s.48 = type { %s.46, %s.49 }
+%s.49 = type { %s.50 }
+%s.50 = type { %s.51, [16 x %s.52], i8, i8, [16 x i16], %s.9, i8, %s.59, %s.33, %s.62, %s.34, %s.64, i8 }
+%s.51 = type { i32, i16, i8, i8, i8, i8, i8, [5 x i8] }
+%s.52 = type { i8, %s.53 }
+%s.53 = type { %s.54 }
+%s.54 = type { %s.55*, i8, i32 }
+%s.55 = type { %s.46, i32, i8*, i8*, %s.55*, %s.55*, i32, i8, i8, i16, i32, i8, %s.56, i16, [1 x %s.58], i32 }
+%s.56 = type { %s.57 }
+%s.57 = type { i8 }
+%s.58 = type { i8*, i32 }
+%s.59 = type { i8, [17 x %s.60] }
+%s.60 = type { i16, i8, [16 x %s.61] }
+%s.61 = type { i8, i8 }
+%s.62 = type { i8, [6 x %s.63] }
+%s.63 = type { i8, i16 }
+%s.64 = type { %s.65, i8, i64 }
+%s.65 = type { i32, [64 x %s.66], i32, [64 x %s.66], i32, [64 x %s.66], i32, [128 x %s.66], i32, [32 x %s.67], i32, [32 x %s.67] }
+%s.66 = type { i8, i32, i8 }
+%s.67 = type { i16, i8 }
+%s.68 = type { %s.69 }
+%s.69 = type { i32, i8* }
+
+ at g0 = external global %s.0, align 8
+ at g1 = external constant %s.68, section ".dummy.dummy.dummy.dumm", align 4
+
+; Function Attrs: optsize
+declare void @f0(%s.68*) #0
+
+; Function Attrs: nounwind optsize
+declare zeroext i8 @f1(i8*) #1
+
+; Function Attrs: nounwind optsize
+declare void @f2(i32) #1
+
+; The pass that used to crash doesn't do anything on this testcase anymore,
+; but check for sane output anyway.
+; CHECK-ADDI: ##g0
+; Function Attrs: nounwind optsize ssp
+define zeroext i8 @f3() #2 {
+b0:
+  %v0 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 57), align 2
+  %v1 = icmp eq i8 %v0, 0
+  br i1 %v1, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  tail call void @f0(%s.68* nonnull @g1) #3
+  unreachable
+
+b2:                                               ; preds = %b0
+  %v2 = call zeroext i8 @f1(i8* nonnull undef) #4
+  br i1 undef, label %b3, label %b8
+
+b3:                                               ; preds = %b2
+  %v3 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
+  %v4 = add i8 %v3, -17
+  %v5 = icmp ult i8 %v4, 2
+  br i1 %v5, label %b4, label %b7
+
+b4:                                               ; preds = %b3
+  %v6 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 167, i32 2), align 2
+  %v7 = sext i8 %v6 to i32
+  %v8 = add nsw i32 %v7, 1
+  %v9 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 167, i32 0), align 2
+  %v10 = zext i8 %v9 to i32
+  %v11 = icmp slt i32 %v8, %v10
+  br i1 %v11, label %b6, label %b5
+
+b5:                                               ; preds = %b4
+  unreachable
+
+b6:                                               ; preds = %b4
+  unreachable
+
+b7:                                               ; preds = %b3
+  unreachable
+
+b8:                                               ; preds = %b2
+  br i1 undef, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  unreachable
+
+b10:                                              ; preds = %b8
+  br i1 undef, label %b12, label %b11
+
+b11:                                              ; preds = %b10
+  unreachable
+
+b12:                                              ; preds = %b10
+  %v12 = load i8, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
+  %v13 = zext i8 %v12 to i32
+  switch i32 %v13, label %b14 [
+    i32 17, label %b13
+    i32 18, label %b13
+    i32 11, label %b15
+  ]
+
+b13:                                              ; preds = %b14, %b12, %b12
+  %v14 = phi i64 [ 4294967294, %b14 ], [ 4294967146, %b12 ], [ 4294967146, %b12 ]
+  %v15 = call i64 @f4(i8 zeroext undef) #3
+  %v16 = add i64 %v15, %v14
+  %v17 = trunc i64 %v16 to i32
+  br label %b15
+
+b14:                                              ; preds = %b12
+  br label %b13
+
+b15:                                              ; preds = %b13, %b12
+  %v18 = phi i32 [ %v17, %b13 ], [ 120000, %b12 ]
+  call void @f2(i32 %v18) #4
+  unreachable
+}
+
+; Function Attrs: optsize
+declare i64 @f4(i8 zeroext) #0
+
+attributes #0 = { optsize "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind optsize "target-cpu"="hexagonv55" }
+attributes #2 = { nounwind optsize ssp "target-cpu"="hexagonv55" }
+attributes #3 = { nounwind optsize }
+attributes #4 = { noinline nounwind optsize }

Added: llvm/trunk/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hexagon-verify-implicit-use.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,46 @@
+; RUN: llc -march=hexagon -O3 -verify-machineinstrs < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { %s.1* }
+%s.1 = type { %s.2, %s.2**, i32, i32, i8, %s.3 }
+%s.2 = type { i32 (...)**, i32 }
+%s.3 = type { %s.4, %s.6, i32, i32 }
+%s.4 = type { %s.5 }
+%s.5 = type { i8 }
+%s.6 = type { i8*, [12 x i8] }
+%s.7 = type { %s.2, %s.8 }
+%s.8 = type { %s.9*, %s.9* }
+%s.9 = type { [16 x i16*] }
+%s.10 = type { i32 (...)**, i32, i8, i8, i16, i32, i32, %s.11*, %s.12*, %s.0* }
+%s.11 = type { %s.11*, i32, i32, i8* }
+%s.12 = type { %s.12*, i32, void (i8, %s.10*, i32)* }
+
+define i32 @f0() #0 personality i8* bitcast (i32 (...)* @f2 to i8*) {
+b0:
+  %v0 = invoke dereferenceable(4) %s.0* @f1()
+          to label %b1 unwind label %b2
+
+b1:                                               ; preds = %b0
+  %v1 = load i32, i32* undef, align 4
+  %v2 = icmp eq i32 %v1, 0
+  %v3 = zext i1 %v2 to i64
+  %v4 = shl nuw nsw i64 %v3, 32
+  %v5 = or i64 %v4, 0
+  %v6 = call i64 @f3(%s.7* undef, i64 %v5, i64 4294967296, %s.10* nonnull dereferenceable(32) undef, i8* nonnull dereferenceable(1) undef, i32* nonnull dereferenceable(4) undef)
+  unreachable
+
+b2:                                               ; preds = %b0
+  %v7 = landingpad { i8*, i32 }
+          cleanup
+  resume { i8*, i32 } undef
+}
+
+declare dereferenceable(4) %s.0* @f1()
+
+declare i32 @f2(...)
+
+declare i64 @f3(%s.7* nocapture readnone, i64, i64, %s.10* nocapture readonly dereferenceable(32), i8* nocapture dereferenceable(1), i32* nocapture dereferenceable(4)) unnamed_addr align 2
+
+attributes #0 = { "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/hexagon_cfi_offset.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hexagon_cfi_offset.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hexagon_cfi_offset.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hexagon_cfi_offset.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,131 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; Check the values of cfi offsets emitted.
+; CHECK: .cfi_def_cfa r30, 8
+; CHECK: .cfi_offset r31, -4
+; CHECK: .cfi_offset r30, -8
+; CHECK: .cfi_offset r17, -12
+; CHECK: .cfi_offset r16, -16
+
+%s.0 = type { i32 (...)**, i8* }
+%s.1 = type { i8*, void (i8*)*, void ()*, void ()*, %s.1*, i32, i32, i8*, i8*, i8*, i8*, %s.2 }
+%s.2 = type { i64, void (i8, %s.2*)*, i32, i32, [12 x i8] }
+%s.3 = type { %s.1*, i32, i8*, i32 }
+
+; Function Attrs: noreturn
+define void @f0(i8* %a0, %s.0* %a1, void (i8*)* %a2) #0 {
+b0:
+  %v0 = getelementptr inbounds i8, i8* %a0, i32 -80
+  %v1 = bitcast i8* %v0 to %s.0**
+  store %s.0* %a1, %s.0** %v1, align 16, !tbaa !0
+  %v2 = getelementptr inbounds i8, i8* %a0, i32 -76
+  %v3 = bitcast i8* %v2 to void (i8*)**
+  store void (i8*)* %a2, void (i8*)** %v3, align 4, !tbaa !9
+  %v4 = tail call void ()* @f1(void ()* null) #3
+  %v5 = getelementptr inbounds i8, i8* %a0, i32 -72
+  %v6 = bitcast i8* %v5 to void ()**
+  store void ()* %v4, void ()** %v6, align 8, !tbaa !10
+  %v7 = tail call void ()* @f1(void ()* %v4) #3
+  %v8 = tail call void ()* @f2(void ()* null) #3
+  %v9 = getelementptr inbounds i8, i8* %a0, i32 -68
+  %v10 = bitcast i8* %v9 to void ()**
+  store void ()* %v8, void ()** %v10, align 4, !tbaa !11
+  %v11 = tail call void ()* @f2(void ()* %v8) #3
+  %v12 = getelementptr inbounds i8, i8* %a0, i32 -64
+  %v13 = bitcast i8* %v12 to %s.1**
+  store %s.1* null, %s.1** %v13, align 16, !tbaa !12
+  %v14 = getelementptr inbounds i8, i8* %a0, i32 -60
+  %v15 = bitcast i8* %v14 to i32*
+  store i32 0, i32* %v15, align 4, !tbaa !13
+  %v16 = getelementptr inbounds i8, i8* %a0, i32 -32
+  %v17 = bitcast i8* %v16 to %s.2*
+  %v18 = bitcast i8* %v16 to i64*
+  store i64 4921953907261516544, i64* %v18, align 16, !tbaa !14
+  %v19 = getelementptr inbounds i8, i8* %a0, i32 -24
+  %v20 = bitcast i8* %v19 to void (i8, %s.2*)**
+  store void (i8, %s.2*)* @f3, void (i8, %s.2*)** %v20, align 8, !tbaa !15
+  %v21 = tail call %s.3* @f4() #3
+  %v22 = getelementptr inbounds %s.3, %s.3* %v21, i32 0, i32 1
+  %v23 = load i32, i32* %v22, align 4, !tbaa !16
+  %v24 = add i32 %v23, 1
+  store i32 %v24, i32* %v22, align 4, !tbaa !16
+  %v25 = tail call zeroext i8 @f5(%s.2* %v17) #4
+  %v26 = tail call i8* @f6(i8* %v16) #3
+  tail call void @f7() #5
+  unreachable
+}
+
+; Function Attrs: nounwind
+declare void ()* @f1(void ()*) #1
+
+; Function Attrs: nounwind
+declare void ()* @f2(void ()*) #1
+
+define internal void @f3(i8 zeroext %a0, %s.2* %a1) #2 {
+b0:
+  %v0 = getelementptr inbounds %s.2, %s.2* %a1, i32 0, i32 0
+  %v1 = load i64, i64* %v0, align 16, !tbaa !18
+  %v2 = icmp eq i64 %v1, 4921953907261516544
+  br i1 %v2, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v3 = getelementptr inbounds %s.2, %s.2* %a1, i32 1
+  %v4 = bitcast %s.2* %v3 to i8*
+  %v5 = getelementptr inbounds %s.2, %s.2* %a1, i32 -2, i32 3
+  %v6 = getelementptr inbounds i32, i32* %v5, i32 1
+  %v7 = bitcast i32* %v6 to void (i8*)**
+  %v8 = load void (i8*)*, void (i8*)** %v7, align 4, !tbaa !9
+  %v9 = icmp eq void (i8*)* %v8, null
+  br i1 %v9, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  tail call void %v8(i8* %v4) #4
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  tail call void @f8(i8* %v4) #3
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  ret void
+}
+
+; Function Attrs: nounwind
+declare %s.3* @f4() #1
+
+declare zeroext i8 @f5(%s.2*) #2
+
+; Function Attrs: nounwind
+declare i8* @f6(i8*) #1
+
+; Function Attrs: noreturn
+declare void @f7() #0
+
+; Function Attrs: nounwind
+declare void @f8(i8*) #1
+
+attributes #0 = { noreturn "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind "target-cpu"="hexagonv60" }
+attributes #2 = { "target-cpu"="hexagonv60" }
+attributes #3 = { nobuiltin nounwind }
+attributes #4 = { nobuiltin }
+attributes #5 = { nobuiltin noreturn }
+
+!0 = !{!1, !2, i64 0}
+!1 = !{!"_ZTS15__cxa_exception", !2, i64 0, !2, i64 4, !2, i64 8, !2, i64 12, !2, i64 16, !5, i64 20, !5, i64 24, !2, i64 28, !2, i64 32, !2, i64 36, !2, i64 40, !6, i64 48}
+!2 = !{!"any pointer", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!"int", !3, i64 0}
+!6 = !{!"_ZTS17_Unwind_Exception", !7, i64 0, !2, i64 8, !8, i64 12, !8, i64 16}
+!7 = !{!"long long", !3, i64 0}
+!8 = !{!"long", !3, i64 0}
+!9 = !{!1, !2, i64 4}
+!10 = !{!1, !2, i64 8}
+!11 = !{!1, !2, i64 12}
+!12 = !{!1, !2, i64 16}
+!13 = !{!1, !5, i64 20}
+!14 = !{!1, !7, i64 48}
+!15 = !{!1, !2, i64 56}
+!16 = !{!17, !5, i64 4}
+!17 = !{!"_ZTS16__cxa_eh_globals", !2, i64 0, !5, i64 4, !2, i64 8, !5, i64 12}
+!18 = !{!6, !7, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/hidden-relocation.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hidden-relocation.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hidden-relocation.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hidden-relocation.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon -O2 -relocation-model=pic < %s | FileCheck %s
+;
+; CHECK: r{{[0-9]+}} = add({{pc|PC}},##g2 at PCREL)
+
+ at g0 = hidden global i32 10, align 4
+ at g1 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 1
+ at g2 = internal global i32* @g0, align 4
+
+; Function Attrs: nounwind
+declare i32 @f0(i8*, ...) #0
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 10, i32* @g0, align 4
+  %v1 = load i32*, i32** @g2, align 4
+  %v2 = load i32, i32* %v1, align 4
+  %v3 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g1, i32 0, i32 0), i32 %v2)
+  %v4 = load i32, i32* %v0
+  ret i32 %v4
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/honor-optsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/honor-optsize.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/honor-optsize.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/honor-optsize.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,43 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; CHECK: f0:
+; CHECK:   call __save_r16_through_r21
+; CHECK:   .size	f0
+define i32 @f0(i8* nocapture %a0) #0 {
+b0:
+  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v1 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v2 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v3 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v4 = load i8, i8* %a0, align 1
+  %v5 = icmp eq i8 %v4, 0
+  br i1 %v5, label %b4, label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v6 = phi i32 [ %v10, %b2 ], [ 0, %b1 ]
+  %v7 = phi i32 [ %v2, %b2 ], [ %v1, %b1 ]
+  %v8 = phi i32 [ %v7, %b2 ], [ %v0, %b1 ]
+  %v9 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v10 = add nsw i32 %v6, %v8
+  %v11 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  %v12 = load i8, i8* %a0, align 1
+  %v13 = icmp eq i8 %v12, 0
+  br i1 %v13, label %b3, label %b2
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v14 = phi i32 [ 0, %b0 ], [ %v10, %b3 ]
+  ret i32 %v14
+}
+
+; Function Attrs: nounwind optsize
+declare i32 @f1(...) #0
+
+attributes #0 = { nounwind optsize }

Added: llvm/trunk/test/CodeGen/Hexagon/hrc-stack-coloring.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hrc-stack-coloring.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hrc-stack-coloring.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hrc-stack-coloring.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,741 @@
+; RUN: llc  -march=hexagon < %s | FileCheck %s
+; This test is no longer connected to HRC.
+
+target triple = "hexagon"
+
+%s.0 = type { %s.1*, %s.2*, %s.3*, i16*, i32*, i8, i8, i8, i8, i8, i8, i16, i16, i16, i32, i32, i32, i32, i16, i8, i8, i8, i8, float, float, float, float, float, float, float, float, float, float, float, [4 x %s.7], [4 x %s.7], [20 x %s.7], [104 x %s.7], [20 x i32], [257 x %s.8], %s.9 }
+%s.1 = type { i16, i8, i16, i8, i8, i8, i8, i8 }
+%s.2 = type { i16, i16, i16, i16, i8, i8, i8, i8, i8, i8, i8, i8, i32, i8, i8, [20 x i16], i8, i16 }
+%s.3 = type { i8, i8, i8, i8, i16, i16, i16, i16, i16, i16, i16, i16, i16, i16, i32, i32, i32, [2 x [2 x i32]], %s.4 }
+%s.4 = type { %s.5, [976 x i8] }
+%s.5 = type { %s.6 }
+%s.6 = type { i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64 }
+%s.7 = type { i64 }
+%s.8 = type { i32, i32 }
+%s.9 = type { %s.10, [1960 x i8] }
+%s.10 = type { i64, i64, i64, i64, i64, i64, i64, [104 x %s.11], [104 x float] }
+%s.11 = type { i64, i64 }
+%s.12 = type { float, float }
+
+; CHECK: .type   f0, at function
+; This allocframe argument value may change, but typically should remain
+; in the 250-280 range. This test was introduced to test a change that
+; reduced stack usage from around 568 bytes to 280 bytes.
+; After r308350 the stack size is ~300.
+; CHECK: allocframe(r29,#304):raw
+define void @f0(%s.0* %a0, %s.11* %a1, %s.12* %a2) #0 {
+b0:
+  %v0 = alloca %s.0*, align 4
+  %v1 = alloca %s.11*, align 4
+  %v2 = alloca %s.12*, align 4
+  %v3 = alloca float, align 4
+  %v4 = alloca float, align 4
+  %v5 = alloca float, align 4
+  %v6 = alloca float, align 4
+  %v7 = alloca float, align 4
+  %v8 = alloca float, align 4
+  %v9 = alloca float, align 4
+  %v10 = alloca float, align 4
+  %v11 = alloca float, align 4
+  %v12 = alloca float, align 4
+  %v13 = alloca double, align 8
+  %v14 = alloca double, align 8
+  %v15 = alloca double, align 8
+  %v16 = alloca double, align 8
+  %v17 = alloca double, align 8
+  %v18 = alloca double, align 8
+  %v19 = alloca double, align 8
+  %v20 = alloca double, align 8
+  %v21 = alloca double, align 8
+  %v22 = alloca double, align 8
+  %v23 = alloca double, align 8
+  %v24 = alloca double, align 8
+  %v25 = alloca double, align 8
+  %v26 = alloca double, align 8
+  %v27 = alloca double, align 8
+  %v28 = alloca double, align 8
+  %v29 = alloca double, align 8
+  %v30 = alloca double, align 8
+  %v31 = alloca double, align 8
+  %v32 = alloca double, align 8
+  %v33 = alloca double, align 8
+  store %s.0* %a0, %s.0** %v0, align 4
+  store %s.11* %a1, %s.11** %v1, align 4
+  store %s.12* %a2, %s.12** %v2, align 4
+  store double 1.000000e+00, double* %v32, align 8
+  %v34 = load %s.11*, %s.11** %v1, align 4
+  %v35 = getelementptr inbounds %s.11, %s.11* %v34, i32 0
+  %v36 = getelementptr inbounds %s.11, %s.11* %v35, i32 0, i32 0
+  %v37 = load i64, i64* %v36, align 8
+  %v38 = sitofp i64 %v37 to double
+  %v39 = load double, double* %v32, align 8
+  %v40 = fmul double %v38, %v39
+  store double %v40, double* %v13, align 8
+  %v41 = load %s.11*, %s.11** %v1, align 4
+  %v42 = getelementptr inbounds %s.11, %s.11* %v41, i32 1
+  %v43 = getelementptr inbounds %s.11, %s.11* %v42, i32 0, i32 0
+  %v44 = load i64, i64* %v43, align 8
+  %v45 = sitofp i64 %v44 to double
+  %v46 = load double, double* %v32, align 8
+  %v47 = fmul double %v45, %v46
+  store double %v47, double* %v14, align 8
+  %v48 = load %s.11*, %s.11** %v1, align 4
+  %v49 = getelementptr inbounds %s.11, %s.11* %v48, i32 1
+  %v50 = getelementptr inbounds %s.11, %s.11* %v49, i32 0, i32 1
+  %v51 = load i64, i64* %v50, align 8
+  %v52 = sitofp i64 %v51 to double
+  %v53 = load double, double* %v32, align 8
+  %v54 = fmul double %v52, %v53
+  store double %v54, double* %v15, align 8
+  %v55 = load %s.11*, %s.11** %v1, align 4
+  %v56 = getelementptr inbounds %s.11, %s.11* %v55, i32 2
+  %v57 = getelementptr inbounds %s.11, %s.11* %v56, i32 0, i32 0
+  %v58 = load i64, i64* %v57, align 8
+  %v59 = sitofp i64 %v58 to double
+  %v60 = load double, double* %v32, align 8
+  %v61 = fmul double %v59, %v60
+  store double %v61, double* %v16, align 8
+  %v62 = load %s.11*, %s.11** %v1, align 4
+  %v63 = getelementptr inbounds %s.11, %s.11* %v62, i32 2
+  %v64 = getelementptr inbounds %s.11, %s.11* %v63, i32 0, i32 1
+  %v65 = load i64, i64* %v64, align 8
+  %v66 = sitofp i64 %v65 to double
+  %v67 = load double, double* %v32, align 8
+  %v68 = fmul double %v66, %v67
+  store double %v68, double* %v17, align 8
+  %v69 = load %s.11*, %s.11** %v1, align 4
+  %v70 = getelementptr inbounds %s.11, %s.11* %v69, i32 3
+  %v71 = getelementptr inbounds %s.11, %s.11* %v70, i32 0, i32 0
+  %v72 = load i64, i64* %v71, align 8
+  %v73 = sitofp i64 %v72 to double
+  %v74 = load double, double* %v32, align 8
+  %v75 = fmul double %v73, %v74
+  store double %v75, double* %v18, align 8
+  %v76 = load %s.11*, %s.11** %v1, align 4
+  %v77 = getelementptr inbounds %s.11, %s.11* %v76, i32 3
+  %v78 = getelementptr inbounds %s.11, %s.11* %v77, i32 0, i32 1
+  %v79 = load i64, i64* %v78, align 8
+  %v80 = sitofp i64 %v79 to double
+  %v81 = load double, double* %v32, align 8
+  %v82 = fmul double %v80, %v81
+  store double %v82, double* %v19, align 8
+  %v83 = load double, double* %v13, align 8
+  %v84 = load double, double* %v13, align 8
+  %v85 = fmul double %v83, %v84
+  %v86 = load double, double* %v14, align 8
+  %v87 = load double, double* %v14, align 8
+  %v88 = fmul double %v86, %v87
+  %v89 = fsub double %v85, %v88
+  %v90 = load double, double* %v15, align 8
+  %v91 = load double, double* %v15, align 8
+  %v92 = fmul double %v90, %v91
+  %v93 = fsub double %v89, %v92
+  store double %v93, double* %v20, align 8
+  %v94 = load double, double* %v13, align 8
+  %v95 = load double, double* %v14, align 8
+  %v96 = fmul double %v94, %v95
+  %v97 = load double, double* %v16, align 8
+  %v98 = load double, double* %v14, align 8
+  %v99 = fmul double %v97, %v98
+  %v100 = fsub double %v96, %v99
+  %v101 = load double, double* %v17, align 8
+  %v102 = load double, double* %v15, align 8
+  %v103 = fmul double %v101, %v102
+  %v104 = fsub double %v100, %v103
+  store double %v104, double* %v21, align 8
+  %v105 = load double, double* %v13, align 8
+  %v106 = load double, double* %v15, align 8
+  %v107 = fmul double %v105, %v106
+  %v108 = load double, double* %v16, align 8
+  %v109 = load double, double* %v15, align 8
+  %v110 = fmul double %v108, %v109
+  %v111 = fadd double %v107, %v110
+  %v112 = load double, double* %v17, align 8
+  %v113 = load double, double* %v14, align 8
+  %v114 = fmul double %v112, %v113
+  %v115 = fsub double %v111, %v114
+  store double %v115, double* %v22, align 8
+  %v116 = load double, double* %v13, align 8
+  %v117 = load double, double* %v16, align 8
+  %v118 = fmul double %v116, %v117
+  %v119 = load double, double* %v18, align 8
+  %v120 = load double, double* %v14, align 8
+  %v121 = fmul double %v119, %v120
+  %v122 = fsub double %v118, %v121
+  %v123 = load double, double* %v19, align 8
+  %v124 = load double, double* %v15, align 8
+  %v125 = fmul double %v123, %v124
+  %v126 = fsub double %v122, %v125
+  store double %v126, double* %v23, align 8
+  %v127 = load double, double* %v13, align 8
+  %v128 = load double, double* %v17, align 8
+  %v129 = fmul double %v127, %v128
+  %v130 = load double, double* %v18, align 8
+  %v131 = load double, double* %v15, align 8
+  %v132 = fmul double %v130, %v131
+  %v133 = fadd double %v129, %v132
+  %v134 = load double, double* %v19, align 8
+  %v135 = load double, double* %v14, align 8
+  %v136 = fmul double %v134, %v135
+  %v137 = fsub double %v133, %v136
+  store double %v137, double* %v24, align 8
+  %v138 = load double, double* %v14, align 8
+  %v139 = load double, double* %v14, align 8
+  %v140 = fmul double %v138, %v139
+  %v141 = load double, double* %v15, align 8
+  %v142 = load double, double* %v15, align 8
+  %v143 = fmul double %v141, %v142
+  %v144 = fsub double %v140, %v143
+  %v145 = load double, double* %v16, align 8
+  %v146 = load double, double* %v13, align 8
+  %v147 = fmul double %v145, %v146
+  %v148 = fsub double %v144, %v147
+  store double %v148, double* %v25, align 8
+  %v149 = load double, double* %v14, align 8
+  %v150 = load double, double* %v15, align 8
+  %v151 = fmul double %v149, %v150
+  %v152 = fmul double %v151, 2.000000e+00
+  %v153 = load double, double* %v17, align 8
+  %v154 = load double, double* %v13, align 8
+  %v155 = fmul double %v153, %v154
+  %v156 = fsub double %v152, %v155
+  store double %v156, double* %v26, align 8
+  %v157 = load double, double* %v14, align 8
+  %v158 = load double, double* %v16, align 8
+  %v159 = fmul double %v157, %v158
+  %v160 = load double, double* %v15, align 8
+  %v161 = load double, double* %v17, align 8
+  %v162 = fmul double %v160, %v161
+  %v163 = fsub double %v159, %v162
+  %v164 = load double, double* %v18, align 8
+  %v165 = load double, double* %v13, align 8
+  %v166 = fmul double %v164, %v165
+  %v167 = fsub double %v163, %v166
+  store double %v167, double* %v27, align 8
+  %v168 = load double, double* %v14, align 8
+  %v169 = load double, double* %v17, align 8
+  %v170 = fmul double %v168, %v169
+  %v171 = load double, double* %v15, align 8
+  %v172 = load double, double* %v16, align 8
+  %v173 = fmul double %v171, %v172
+  %v174 = fadd double %v170, %v173
+  %v175 = load double, double* %v19, align 8
+  %v176 = load double, double* %v13, align 8
+  %v177 = fmul double %v175, %v176
+  %v178 = fsub double %v174, %v177
+  store double %v178, double* %v28, align 8
+  %v179 = load double, double* %v16, align 8
+  %v180 = load double, double* %v16, align 8
+  %v181 = fmul double %v179, %v180
+  %v182 = load double, double* %v17, align 8
+  %v183 = load double, double* %v17, align 8
+  %v184 = fmul double %v182, %v183
+  %v185 = fsub double %v181, %v184
+  %v186 = load double, double* %v18, align 8
+  %v187 = load double, double* %v14, align 8
+  %v188 = fmul double %v186, %v187
+  %v189 = fsub double %v185, %v188
+  %v190 = load double, double* %v19, align 8
+  %v191 = load double, double* %v15, align 8
+  %v192 = fmul double %v190, %v191
+  %v193 = fadd double %v189, %v192
+  store double %v193, double* %v29, align 8
+  %v194 = load double, double* %v16, align 8
+  %v195 = load double, double* %v17, align 8
+  %v196 = fmul double %v194, %v195
+  %v197 = fmul double %v196, 2.000000e+00
+  %v198 = load double, double* %v18, align 8
+  %v199 = load double, double* %v15, align 8
+  %v200 = fmul double %v198, %v199
+  %v201 = fsub double %v197, %v200
+  %v202 = load double, double* %v19, align 8
+  %v203 = load double, double* %v14, align 8
+  %v204 = fmul double %v202, %v203
+  %v205 = fsub double %v201, %v204
+  store double %v205, double* %v30, align 8
+  %v206 = load double, double* %v20, align 8
+  %v207 = load double, double* %v20, align 8
+  %v208 = fmul double %v206, %v207
+  %v209 = load double, double* %v21, align 8
+  %v210 = load double, double* %v21, align 8
+  %v211 = fmul double %v209, %v210
+  %v212 = fsub double %v208, %v211
+  %v213 = load double, double* %v22, align 8
+  %v214 = load double, double* %v22, align 8
+  %v215 = fmul double %v213, %v214
+  %v216 = fsub double %v212, %v215
+  %v217 = load double, double* %v23, align 8
+  %v218 = load double, double* %v25, align 8
+  %v219 = fmul double %v217, %v218
+  %v220 = fmul double %v219, 2.000000e+00
+  %v221 = fadd double %v216, %v220
+  %v222 = load double, double* %v24, align 8
+  %v223 = load double, double* %v26, align 8
+  %v224 = fmul double %v222, %v223
+  %v225 = fmul double %v224, 2.000000e+00
+  %v226 = fadd double %v221, %v225
+  %v227 = load double, double* %v27, align 8
+  %v228 = load double, double* %v27, align 8
+  %v229 = fmul double %v227, %v228
+  %v230 = fsub double %v226, %v229
+  %v231 = load double, double* %v28, align 8
+  %v232 = load double, double* %v28, align 8
+  %v233 = fmul double %v231, %v232
+  %v234 = fsub double %v230, %v233
+  %v235 = load double, double* %v29, align 8
+  %v236 = load double, double* %v29, align 8
+  %v237 = fmul double %v235, %v236
+  %v238 = fadd double %v234, %v237
+  %v239 = load double, double* %v30, align 8
+  %v240 = load double, double* %v30, align 8
+  %v241 = fmul double %v239, %v240
+  %v242 = fadd double %v238, %v241
+  store double %v242, double* %v31, align 8
+  %v243 = load double, double* %v31, align 8
+  %v244 = call double @f1(double %v243) #1
+  %v245 = load double, double* %v32, align 8
+  %v246 = fcmp olt double %v244, %v245
+  br i1 %v246, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v247 = load %s.0*, %s.0** %v0, align 4
+  %v248 = getelementptr inbounds %s.0, %s.0* %v247, i32 0, i32 2
+  %v249 = load %s.3*, %s.3** %v248, align 4
+  %v250 = getelementptr inbounds %s.3, %s.3* %v249, i32 0, i32 0
+  store i8 3, i8* %v250, align 1
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v251 = load double, double* %v32, align 8
+  %v252 = load double, double* %v31, align 8
+  %v253 = fdiv double %v251, %v252
+  store double %v253, double* %v32, align 8
+  %v254 = load double, double* %v13, align 8
+  %v255 = load double, double* %v20, align 8
+  %v256 = fmul double %v254, %v255
+  %v257 = load double, double* %v14, align 8
+  %v258 = load double, double* %v21, align 8
+  %v259 = fmul double %v257, %v258
+  %v260 = fsub double %v256, %v259
+  %v261 = load double, double* %v15, align 8
+  %v262 = load double, double* %v22, align 8
+  %v263 = fmul double %v261, %v262
+  %v264 = fsub double %v260, %v263
+  %v265 = load double, double* %v16, align 8
+  %v266 = load double, double* %v25, align 8
+  %v267 = fmul double %v265, %v266
+  %v268 = fadd double %v264, %v267
+  %v269 = load double, double* %v17, align 8
+  %v270 = load double, double* %v26, align 8
+  %v271 = fmul double %v269, %v270
+  %v272 = fadd double %v268, %v271
+  store double %v272, double* %v33, align 8
+  %v273 = load double, double* %v33, align 8
+  %v274 = load double, double* %v32, align 8
+  %v275 = fmul double %v273, %v274
+  %v276 = fptrunc double %v275 to float
+  store float %v276, float* %v3, align 4
+  %v277 = load double, double* %v14, align 8
+  %v278 = fsub double -0.000000e+00, %v277
+  %v279 = load double, double* %v20, align 8
+  %v280 = fmul double %v278, %v279
+  %v281 = load double, double* %v16, align 8
+  %v282 = load double, double* %v21, align 8
+  %v283 = fmul double %v281, %v282
+  %v284 = fadd double %v280, %v283
+  %v285 = load double, double* %v17, align 8
+  %v286 = load double, double* %v22, align 8
+  %v287 = fmul double %v285, %v286
+  %v288 = fadd double %v284, %v287
+  %v289 = load double, double* %v18, align 8
+  %v290 = load double, double* %v25, align 8
+  %v291 = fmul double %v289, %v290
+  %v292 = fsub double %v288, %v291
+  %v293 = load double, double* %v19, align 8
+  %v294 = load double, double* %v26, align 8
+  %v295 = fmul double %v293, %v294
+  %v296 = fsub double %v292, %v295
+  store double %v296, double* %v33, align 8
+  %v297 = load double, double* %v33, align 8
+  %v298 = load double, double* %v32, align 8
+  %v299 = fmul double %v297, %v298
+  %v300 = fptrunc double %v299 to float
+  store float %v300, float* %v4, align 4
+  %v301 = load double, double* %v15, align 8
+  %v302 = fsub double -0.000000e+00, %v301
+  %v303 = load double, double* %v20, align 8
+  %v304 = fmul double %v302, %v303
+  %v305 = load double, double* %v16, align 8
+  %v306 = load double, double* %v22, align 8
+  %v307 = fmul double %v305, %v306
+  %v308 = fsub double %v304, %v307
+  %v309 = load double, double* %v17, align 8
+  %v310 = load double, double* %v21, align 8
+  %v311 = fmul double %v309, %v310
+  %v312 = fadd double %v308, %v311
+  %v313 = load double, double* %v18, align 8
+  %v314 = load double, double* %v26, align 8
+  %v315 = fmul double %v313, %v314
+  %v316 = fadd double %v312, %v315
+  %v317 = load double, double* %v19, align 8
+  %v318 = load double, double* %v25, align 8
+  %v319 = fmul double %v317, %v318
+  %v320 = fsub double %v316, %v319
+  store double %v320, double* %v33, align 8
+  %v321 = load double, double* %v33, align 8
+  %v322 = load double, double* %v32, align 8
+  %v323 = fmul double %v321, %v322
+  %v324 = fptrunc double %v323 to float
+  store float %v324, float* %v5, align 4
+  %v325 = load double, double* %v16, align 8
+  %v326 = load double, double* %v29, align 8
+  %v327 = fmul double %v325, %v326
+  %v328 = load double, double* %v17, align 8
+  %v329 = load double, double* %v30, align 8
+  %v330 = fmul double %v328, %v329
+  %v331 = fadd double %v327, %v330
+  %v332 = load double, double* %v14, align 8
+  %v333 = load double, double* %v27, align 8
+  %v334 = fmul double %v332, %v333
+  %v335 = fsub double %v331, %v334
+  %v336 = load double, double* %v15, align 8
+  %v337 = load double, double* %v28, align 8
+  %v338 = fmul double %v336, %v337
+  %v339 = fsub double %v335, %v338
+  %v340 = load double, double* %v13, align 8
+  %v341 = load double, double* %v25, align 8
+  %v342 = fmul double %v340, %v341
+  %v343 = fadd double %v339, %v342
+  store double %v343, double* %v33, align 8
+  %v344 = load double, double* %v33, align 8
+  %v345 = load double, double* %v32, align 8
+  %v346 = fmul double %v344, %v345
+  %v347 = fptrunc double %v346 to float
+  store float %v347, float* %v6, align 4
+  %v348 = load double, double* %v16, align 8
+  %v349 = load double, double* %v30, align 8
+  %v350 = fmul double %v348, %v349
+  %v351 = load double, double* %v17, align 8
+  %v352 = load double, double* %v29, align 8
+  %v353 = fmul double %v351, %v352
+  %v354 = fsub double %v350, %v353
+  %v355 = load double, double* %v14, align 8
+  %v356 = load double, double* %v28, align 8
+  %v357 = fmul double %v355, %v356
+  %v358 = fsub double %v354, %v357
+  %v359 = load double, double* %v15, align 8
+  %v360 = load double, double* %v27, align 8
+  %v361 = fmul double %v359, %v360
+  %v362 = fadd double %v358, %v361
+  %v363 = load double, double* %v13, align 8
+  %v364 = load double, double* %v26, align 8
+  %v365 = fmul double %v363, %v364
+  %v366 = fadd double %v362, %v365
+  store double %v366, double* %v33, align 8
+  %v367 = load double, double* %v33, align 8
+  %v368 = load double, double* %v32, align 8
+  %v369 = fmul double %v367, %v368
+  %v370 = fptrunc double %v369 to float
+  store float %v370, float* %v7, align 4
+  %v371 = load double, double* %v14, align 8
+  %v372 = fsub double -0.000000e+00, %v371
+  %v373 = load double, double* %v29, align 8
+  %v374 = fmul double %v372, %v373
+  %v375 = load double, double* %v15, align 8
+  %v376 = load double, double* %v30, align 8
+  %v377 = fmul double %v375, %v376
+  %v378 = fsub double %v374, %v377
+  %v379 = load double, double* %v13, align 8
+  %v380 = load double, double* %v27, align 8
+  %v381 = fmul double %v379, %v380
+  %v382 = fadd double %v378, %v381
+  %v383 = load double, double* %v14, align 8
+  %v384 = load double, double* %v25, align 8
+  %v385 = fmul double %v383, %v384
+  %v386 = fsub double %v382, %v385
+  %v387 = load double, double* %v15, align 8
+  %v388 = load double, double* %v26, align 8
+  %v389 = fmul double %v387, %v388
+  %v390 = fadd double %v386, %v389
+  store double %v390, double* %v33, align 8
+  %v391 = load double, double* %v33, align 8
+  %v392 = load double, double* %v32, align 8
+  %v393 = fmul double %v391, %v392
+  %v394 = fptrunc double %v393 to float
+  store float %v394, float* %v8, align 4
+  %v395 = load double, double* %v14, align 8
+  %v396 = fsub double -0.000000e+00, %v395
+  %v397 = load double, double* %v30, align 8
+  %v398 = fmul double %v396, %v397
+  %v399 = load double, double* %v15, align 8
+  %v400 = load double, double* %v29, align 8
+  %v401 = fmul double %v399, %v400
+  %v402 = fadd double %v398, %v401
+  %v403 = load double, double* %v13, align 8
+  %v404 = load double, double* %v28, align 8
+  %v405 = fmul double %v403, %v404
+  %v406 = fadd double %v402, %v405
+  %v407 = load double, double* %v14, align 8
+  %v408 = load double, double* %v26, align 8
+  %v409 = fmul double %v407, %v408
+  %v410 = fsub double %v406, %v409
+  %v411 = load double, double* %v15, align 8
+  %v412 = load double, double* %v25, align 8
+  %v413 = fmul double %v411, %v412
+  %v414 = fsub double %v410, %v413
+  store double %v414, double* %v33, align 8
+  %v415 = load double, double* %v33, align 8
+  %v416 = load double, double* %v32, align 8
+  %v417 = fmul double %v415, %v416
+  %v418 = fptrunc double %v417 to float
+  store float %v418, float* %v9, align 4
+  %v419 = load double, double* %v13, align 8
+  %v420 = load double, double* %v20, align 8
+  %v421 = fmul double %v419, %v420
+  %v422 = load double, double* %v16, align 8
+  %v423 = load double, double* %v23, align 8
+  %v424 = fmul double %v422, %v423
+  %v425 = fsub double %v421, %v424
+  %v426 = load double, double* %v17, align 8
+  %v427 = load double, double* %v24, align 8
+  %v428 = fmul double %v426, %v427
+  %v429 = fsub double %v425, %v428
+  %v430 = load double, double* %v18, align 8
+  %v431 = load double, double* %v27, align 8
+  %v432 = fmul double %v430, %v431
+  %v433 = fadd double %v429, %v432
+  %v434 = load double, double* %v19, align 8
+  %v435 = load double, double* %v28, align 8
+  %v436 = fmul double %v434, %v435
+  %v437 = fadd double %v433, %v436
+  store double %v437, double* %v33, align 8
+  %v438 = load double, double* %v33, align 8
+  %v439 = load double, double* %v32, align 8
+  %v440 = fmul double %v438, %v439
+  %v441 = fptrunc double %v440 to float
+  store float %v441, float* %v10, align 4
+  %v442 = load double, double* %v18, align 8
+  %v443 = fsub double -0.000000e+00, %v442
+  %v444 = load double, double* %v29, align 8
+  %v445 = fmul double %v443, %v444
+  %v446 = load double, double* %v19, align 8
+  %v447 = load double, double* %v30, align 8
+  %v448 = fmul double %v446, %v447
+  %v449 = fsub double %v445, %v448
+  %v450 = load double, double* %v14, align 8
+  %v451 = load double, double* %v23, align 8
+  %v452 = fmul double %v450, %v451
+  %v453 = fadd double %v449, %v452
+  %v454 = load double, double* %v15, align 8
+  %v455 = load double, double* %v24, align 8
+  %v456 = fmul double %v454, %v455
+  %v457 = fadd double %v453, %v456
+  %v458 = load double, double* %v13, align 8
+  %v459 = load double, double* %v21, align 8
+  %v460 = fmul double %v458, %v459
+  %v461 = fsub double %v457, %v460
+  store double %v461, double* %v33, align 8
+  %v462 = load double, double* %v33, align 8
+  %v463 = load double, double* %v32, align 8
+  %v464 = fmul double %v462, %v463
+  %v465 = fptrunc double %v464 to float
+  store float %v465, float* %v11, align 4
+  %v466 = load double, double* %v18, align 8
+  %v467 = fsub double -0.000000e+00, %v466
+  %v468 = load double, double* %v30, align 8
+  %v469 = fmul double %v467, %v468
+  %v470 = load double, double* %v19, align 8
+  %v471 = load double, double* %v29, align 8
+  %v472 = fmul double %v470, %v471
+  %v473 = fadd double %v469, %v472
+  %v474 = load double, double* %v14, align 8
+  %v475 = load double, double* %v24, align 8
+  %v476 = fmul double %v474, %v475
+  %v477 = fadd double %v473, %v476
+  %v478 = load double, double* %v15, align 8
+  %v479 = load double, double* %v23, align 8
+  %v480 = fmul double %v478, %v479
+  %v481 = fsub double %v477, %v480
+  %v482 = load double, double* %v13, align 8
+  %v483 = load double, double* %v22, align 8
+  %v484 = fmul double %v482, %v483
+  %v485 = fsub double %v481, %v484
+  store double %v485, double* %v33, align 8
+  %v486 = load double, double* %v33, align 8
+  %v487 = load double, double* %v32, align 8
+  %v488 = fmul double %v486, %v487
+  %v489 = fptrunc double %v488 to float
+  store float %v489, float* %v12, align 4
+  %v490 = load float, float* %v3, align 4
+  %v491 = load %s.12*, %s.12** %v2, align 4
+  %v492 = getelementptr inbounds %s.12, %s.12* %v491, i32 0
+  %v493 = getelementptr inbounds %s.12, %s.12* %v492, i32 0, i32 0
+  store float %v490, float* %v493, align 4
+  %v494 = load %s.12*, %s.12** %v2, align 4
+  %v495 = getelementptr inbounds %s.12, %s.12* %v494, i32 0
+  %v496 = getelementptr inbounds %s.12, %s.12* %v495, i32 0, i32 1
+  store float 0.000000e+00, float* %v496, align 4
+  %v497 = load float, float* %v4, align 4
+  %v498 = load %s.12*, %s.12** %v2, align 4
+  %v499 = getelementptr inbounds %s.12, %s.12* %v498, i32 1
+  %v500 = getelementptr inbounds %s.12, %s.12* %v499, i32 0, i32 0
+  store float %v497, float* %v500, align 4
+  %v501 = load float, float* %v5, align 4
+  %v502 = load %s.12*, %s.12** %v2, align 4
+  %v503 = getelementptr inbounds %s.12, %s.12* %v502, i32 1
+  %v504 = getelementptr inbounds %s.12, %s.12* %v503, i32 0, i32 1
+  store float %v501, float* %v504, align 4
+  %v505 = load float, float* %v6, align 4
+  %v506 = load %s.12*, %s.12** %v2, align 4
+  %v507 = getelementptr inbounds %s.12, %s.12* %v506, i32 2
+  %v508 = getelementptr inbounds %s.12, %s.12* %v507, i32 0, i32 0
+  store float %v505, float* %v508, align 4
+  %v509 = load float, float* %v7, align 4
+  %v510 = load %s.12*, %s.12** %v2, align 4
+  %v511 = getelementptr inbounds %s.12, %s.12* %v510, i32 2
+  %v512 = getelementptr inbounds %s.12, %s.12* %v511, i32 0, i32 1
+  store float %v509, float* %v512, align 4
+  %v513 = load float, float* %v8, align 4
+  %v514 = load %s.12*, %s.12** %v2, align 4
+  %v515 = getelementptr inbounds %s.12, %s.12* %v514, i32 3
+  %v516 = getelementptr inbounds %s.12, %s.12* %v515, i32 0, i32 0
+  store float %v513, float* %v516, align 4
+  %v517 = load float, float* %v9, align 4
+  %v518 = load %s.12*, %s.12** %v2, align 4
+  %v519 = getelementptr inbounds %s.12, %s.12* %v518, i32 3
+  %v520 = getelementptr inbounds %s.12, %s.12* %v519, i32 0, i32 1
+  store float %v517, float* %v520, align 4
+  %v521 = load float, float* %v4, align 4
+  %v522 = load %s.12*, %s.12** %v2, align 4
+  %v523 = getelementptr inbounds %s.12, %s.12* %v522, i32 4
+  %v524 = getelementptr inbounds %s.12, %s.12* %v523, i32 0, i32 0
+  store float %v521, float* %v524, align 4
+  %v525 = load float, float* %v5, align 4
+  %v526 = fsub float -0.000000e+00, %v525
+  %v527 = load %s.12*, %s.12** %v2, align 4
+  %v528 = getelementptr inbounds %s.12, %s.12* %v527, i32 4
+  %v529 = getelementptr inbounds %s.12, %s.12* %v528, i32 0, i32 1
+  store float %v526, float* %v529, align 4
+  %v530 = load float, float* %v10, align 4
+  %v531 = load %s.12*, %s.12** %v2, align 4
+  %v532 = getelementptr inbounds %s.12, %s.12* %v531, i32 5
+  %v533 = getelementptr inbounds %s.12, %s.12* %v532, i32 0, i32 0
+  store float %v530, float* %v533, align 4
+  %v534 = load %s.12*, %s.12** %v2, align 4
+  %v535 = getelementptr inbounds %s.12, %s.12* %v534, i32 5
+  %v536 = getelementptr inbounds %s.12, %s.12* %v535, i32 0, i32 1
+  store float 0.000000e+00, float* %v536, align 4
+  %v537 = load float, float* %v11, align 4
+  %v538 = load %s.12*, %s.12** %v2, align 4
+  %v539 = getelementptr inbounds %s.12, %s.12* %v538, i32 6
+  %v540 = getelementptr inbounds %s.12, %s.12* %v539, i32 0, i32 0
+  store float %v537, float* %v540, align 4
+  %v541 = load float, float* %v12, align 4
+  %v542 = load %s.12*, %s.12** %v2, align 4
+  %v543 = getelementptr inbounds %s.12, %s.12* %v542, i32 6
+  %v544 = getelementptr inbounds %s.12, %s.12* %v543, i32 0, i32 1
+  store float %v541, float* %v544, align 4
+  %v545 = load float, float* %v6, align 4
+  %v546 = load %s.12*, %s.12** %v2, align 4
+  %v547 = getelementptr inbounds %s.12, %s.12* %v546, i32 7
+  %v548 = getelementptr inbounds %s.12, %s.12* %v547, i32 0, i32 0
+  store float %v545, float* %v548, align 4
+  %v549 = load float, float* %v7, align 4
+  %v550 = load %s.12*, %s.12** %v2, align 4
+  %v551 = getelementptr inbounds %s.12, %s.12* %v550, i32 7
+  %v552 = getelementptr inbounds %s.12, %s.12* %v551, i32 0, i32 1
+  store float %v549, float* %v552, align 4
+  %v553 = load float, float* %v6, align 4
+  %v554 = load %s.12*, %s.12** %v2, align 4
+  %v555 = getelementptr inbounds %s.12, %s.12* %v554, i32 8
+  %v556 = getelementptr inbounds %s.12, %s.12* %v555, i32 0, i32 0
+  store float %v553, float* %v556, align 4
+  %v557 = load float, float* %v7, align 4
+  %v558 = fsub float -0.000000e+00, %v557
+  %v559 = load %s.12*, %s.12** %v2, align 4
+  %v560 = getelementptr inbounds %s.12, %s.12* %v559, i32 8
+  %v561 = getelementptr inbounds %s.12, %s.12* %v560, i32 0, i32 1
+  store float %v558, float* %v561, align 4
+  %v562 = load float, float* %v11, align 4
+  %v563 = load %s.12*, %s.12** %v2, align 4
+  %v564 = getelementptr inbounds %s.12, %s.12* %v563, i32 9
+  %v565 = getelementptr inbounds %s.12, %s.12* %v564, i32 0, i32 0
+  store float %v562, float* %v565, align 4
+  %v566 = load float, float* %v12, align 4
+  %v567 = fsub float -0.000000e+00, %v566
+  %v568 = load %s.12*, %s.12** %v2, align 4
+  %v569 = getelementptr inbounds %s.12, %s.12* %v568, i32 9
+  %v570 = getelementptr inbounds %s.12, %s.12* %v569, i32 0, i32 1
+  store float %v567, float* %v570, align 4
+  %v571 = load float, float* %v10, align 4
+  %v572 = load %s.12*, %s.12** %v2, align 4
+  %v573 = getelementptr inbounds %s.12, %s.12* %v572, i32 10
+  %v574 = getelementptr inbounds %s.12, %s.12* %v573, i32 0, i32 0
+  store float %v571, float* %v574, align 4
+  %v575 = load %s.12*, %s.12** %v2, align 4
+  %v576 = getelementptr inbounds %s.12, %s.12* %v575, i32 10
+  %v577 = getelementptr inbounds %s.12, %s.12* %v576, i32 0, i32 1
+  store float 0.000000e+00, float* %v577, align 4
+  %v578 = load float, float* %v4, align 4
+  %v579 = load %s.12*, %s.12** %v2, align 4
+  %v580 = getelementptr inbounds %s.12, %s.12* %v579, i32 11
+  %v581 = getelementptr inbounds %s.12, %s.12* %v580, i32 0, i32 0
+  store float %v578, float* %v581, align 4
+  %v582 = load float, float* %v5, align 4
+  %v583 = load %s.12*, %s.12** %v2, align 4
+  %v584 = getelementptr inbounds %s.12, %s.12* %v583, i32 11
+  %v585 = getelementptr inbounds %s.12, %s.12* %v584, i32 0, i32 1
+  store float %v582, float* %v585, align 4
+  %v586 = load float, float* %v8, align 4
+  %v587 = load %s.12*, %s.12** %v2, align 4
+  %v588 = getelementptr inbounds %s.12, %s.12* %v587, i32 12
+  %v589 = getelementptr inbounds %s.12, %s.12* %v588, i32 0, i32 0
+  store float %v586, float* %v589, align 4
+  %v590 = load float, float* %v9, align 4
+  %v591 = fsub float -0.000000e+00, %v590
+  %v592 = load %s.12*, %s.12** %v2, align 4
+  %v593 = getelementptr inbounds %s.12, %s.12* %v592, i32 12
+  %v594 = getelementptr inbounds %s.12, %s.12* %v593, i32 0, i32 1
+  store float %v591, float* %v594, align 4
+  %v595 = load float, float* %v6, align 4
+  %v596 = load %s.12*, %s.12** %v2, align 4
+  %v597 = getelementptr inbounds %s.12, %s.12* %v596, i32 13
+  %v598 = getelementptr inbounds %s.12, %s.12* %v597, i32 0, i32 0
+  store float %v595, float* %v598, align 4
+  %v599 = load float, float* %v7, align 4
+  %v600 = fsub float -0.000000e+00, %v599
+  %v601 = load %s.12*, %s.12** %v2, align 4
+  %v602 = getelementptr inbounds %s.12, %s.12* %v601, i32 13
+  %v603 = getelementptr inbounds %s.12, %s.12* %v602, i32 0, i32 1
+  store float %v600, float* %v603, align 4
+  %v604 = load float, float* %v4, align 4
+  %v605 = load %s.12*, %s.12** %v2, align 4
+  %v606 = getelementptr inbounds %s.12, %s.12* %v605, i32 14
+  %v607 = getelementptr inbounds %s.12, %s.12* %v606, i32 0, i32 0
+  store float %v604, float* %v607, align 4
+  %v608 = load float, float* %v5, align 4
+  %v609 = fsub float -0.000000e+00, %v608
+  %v610 = load %s.12*, %s.12** %v2, align 4
+  %v611 = getelementptr inbounds %s.12, %s.12* %v610, i32 14
+  %v612 = getelementptr inbounds %s.12, %s.12* %v611, i32 0, i32 1
+  store float %v609, float* %v612, align 4
+  %v613 = load float, float* %v3, align 4
+  %v614 = load %s.12*, %s.12** %v2, align 4
+  %v615 = getelementptr inbounds %s.12, %s.12* %v614, i32 15
+  %v616 = getelementptr inbounds %s.12, %s.12* %v615, i32 0, i32 0
+  store float %v613, float* %v616, align 4
+  %v617 = load %s.12*, %s.12** %v2, align 4
+  %v618 = getelementptr inbounds %s.12, %s.12* %v617, i32 15
+  %v619 = getelementptr inbounds %s.12, %s.12* %v618, i32 0, i32 1
+  store float 0.000000e+00, float* %v619, align 4
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare double @f1(double) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store-double.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store-double.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store-double.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store-double.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,57 @@
+; RUN: llc -march=hexagon -mattr=+hvxv60,+hvx-length128b < %s | FileCheck %s
+
+; Test that we generate code for the vector byte enable store instrinsics.
+
+; CHECK-LABEL: f0:
+; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+define void @f0(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <32 x i32> %a0 to <1024 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<1024 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.qpred.ai.128B(<1024 x i1>, i8*, <32 x i32>) #0
+
+; CHECK-LABEL: f1:
+; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+define void @f1(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <32 x i32> %a0 to <1024 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.nqpred.ai.128B(<1024 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.nqpred.ai.128B(<1024 x i1>, i8*, <32 x i32>) #0
+
+; CHECK-LABEL: f2:
+; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+define void @f2(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <32 x i32> %a0 to <1024 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.nt.qpred.ai.128B(<1024 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.nt.qpred.ai.128B(<1024 x i1>, i8*, <32 x i32>) #0
+
+; CHECK-LABEL: f3:
+; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+define void @f3(<32 x i32> %a0, i8* %a1, <32 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <32 x i32> %a0 to <1024 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B(<1024 x i1> %v0, i8* %a1, <32 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B(<1024 x i1>, i8*, <32 x i32>) #0
+
+attributes #0 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hvx-byte-store.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,57 @@
+; RUN: llc -march=hexagon -mattr=+hvxv60,+hvx-length64b < %s | FileCheck %s
+
+; Test that we generate code for the vector byte enabled store intrinsics.
+
+; CHECK-LABEL: f0:
+; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+define void @f0(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <16 x i32> %a0 to <512 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.qpred.ai(<512 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.qpred.ai(<512 x i1>, i8*, <16 x i32>) #0
+
+; CHECK-LABEL: f1:
+; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0) = v{{[0-9]+}}
+
+define void @f1(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <16 x i32> %a0 to <512 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.nqpred.ai(<512 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.nqpred.ai(<512 x i1>, i8*, <16 x i32>) #0
+
+; CHECK-LABEL: f2:
+; CHECK: if (q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+define void @f2(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <16 x i32> %a0 to <512 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.nt.qpred.ai(<512 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.nt.qpred.ai(<512 x i1>, i8*, <16 x i32>) #0
+
+; CHECK-LABEL: f3:
+; CHECK: if (!q{{[0-3]}}) vmem(r{{[0-9]+}}+#0):nt = v{{[0-9]+}}
+
+define void @f3(<16 x i32> %a0, i8* %a1, <16 x i32> %a2) local_unnamed_addr {
+b0:
+  %v0 = bitcast <16 x i32> %a0 to <512 x i1>
+  tail call void @llvm.hexagon.V6.vS32b.nt.nqpred.ai(<512 x i1> %v0, i8* %a1, <16 x i32> %a2)
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vS32b.nt.nqpred.ai(<512 x i1>, i8*, <16 x i32>) #0
+
+attributes #0 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hvx-dbl-dual-output.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,34 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we compile the HVX dual output intrinsics.
+
+; CHECK-LABEL: f0:
+; CHECK: v{{[0-9]+}}.w = vadd(v{{[0-9]+}}.w,v{{[0-9]+}}.w,q{{[0-3]}}):carry
+define inreg <32 x i32> @f0(<32 x i32> %a0, <32 x i32> %a1, i8* nocapture readonly %a2) #0 {
+b0:
+  %v0 = bitcast i8* %a2 to <1024 x i1>*
+  %v1 = load <1024 x i1>, <1024 x i1>* %v0, align 128
+  %v2 = tail call { <32 x i32>, <1024 x i1> } @llvm.hexagon.V6.vaddcarry.128B(<32 x i32> %a0, <32 x i32> %a1, <1024 x i1> %v1)
+  %v3 = extractvalue { <32 x i32>, <1024 x i1> } %v2, 0
+  ret <32 x i32> %v3
+}
+
+; CHECK-LABEL: f1:
+; CHECK: v{{[0-9]+}}.w = vsub(v{{[0-9]+}}.w,v{{[0-9]+}}.w,q{{[0-3]}}):carry
+define inreg <32 x i32> @f1(<32 x i32> %a0, <32 x i32> %a1, i8* nocapture readonly %a2) #0 {
+b0:
+  %v0 = bitcast i8* %a2 to <1024 x i1>*
+  %v1 = load <1024 x i1>, <1024 x i1>* %v0, align 128
+  %v2 = tail call { <32 x i32>, <1024 x i1> } @llvm.hexagon.V6.vsubcarry.128B(<32 x i32> %a0, <32 x i32> %a1, <1024 x i1> %v1)
+  %v3 = extractvalue { <32 x i32>, <1024 x i1> } %v2, 0
+  ret <32 x i32> %v3
+}
+
+; Function Attrs: nounwind readnone
+declare { <32 x i32>, <1024 x i1> } @llvm.hexagon.V6.vaddcarry.128B(<32 x i32>, <32 x i32>, <1024 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare { <32 x i32>, <1024 x i1> } @llvm.hexagon.V6.vsubcarry.128B(<32 x i32>, <32 x i32>, <1024 x i1>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" "target-features"="+hvxv65,+hvx-length128b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/hvx-double-vzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hvx-double-vzero.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hvx-double-vzero.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hvx-double-vzero.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that V_vzero and W_vzero intrinsics work. The W_vzero intrinsic was added
+; for v65/hvx.
+
+; CHECK-LABEL: f0:
+; CHECK: [[VREG1:v([0-9]+)]] = vxor([[VREG1]],[[VREG1]])
+define void @f0(i16** nocapture %a0) #0 {
+b0:
+  %v0 = bitcast i16** %a0 to <32 x i32>*
+  %v1 = tail call <32 x i32> @llvm.hexagon.V6.vd0.128B()
+  store <32 x i32> %v1, <32 x i32>* %v0, align 64
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vd0.128B() #1
+
+; CHECK-LABEL: f1:
+; CHECK: [[VREG2:v([0-9]+):([0-9]+).w]] = vsub([[VREG2]],[[VREG2]])
+define void @f1(i16** nocapture %a0) #0 {
+b0:
+  %v0 = bitcast i16** %a0 to <64 x i32>*
+  %v1 = tail call <64 x i32> @llvm.hexagon.V6.vdd0.128B()
+  store <64 x i32> %v1, <64 x i32>* %v0, align 128
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vdd0.128B() #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" "target-features"="+hvxv65,+hvx-length128b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/hvx-dual-output.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hvx-dual-output.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hvx-dual-output.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hvx-dual-output.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,34 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we compile the HVX dual output intrinsics.
+
+; CHECK-LABEL: f0:
+; CHECK: v{{[0-9]+}}.w = vadd(v{{[0-9]+}}.w,v{{[0-9]+}}.w,q{{[0-3]}}):carry
+define inreg <16 x i32> @f0(<16 x i32> %a0, <16 x i32> %a1, i8* nocapture readonly %a2) #0 {
+b0:
+  %v0 = bitcast i8* %a2 to <512 x i1>*
+  %v1 = load <512 x i1>, <512 x i1>* %v0, align 64
+  %v2 = tail call { <16 x i32>, <512 x i1> } @llvm.hexagon.V6.vaddcarry(<16 x i32> %a0, <16 x i32> %a1, <512 x i1> %v1)
+  %v3 = extractvalue { <16 x i32>, <512 x i1> } %v2, 0
+  ret <16 x i32> %v3
+}
+
+; CHECK-LABEL: f1:
+; CHECK: v{{[0-9]+}}.w = vsub(v{{[0-9]+}}.w,v{{[0-9]+}}.w,q{{[0-3]}}):carry
+define inreg <16 x i32> @f1(<16 x i32> %a0, <16 x i32> %a1, i8* nocapture readonly %a2) #0 {
+b0:
+  %v0 = bitcast i8* %a2 to <512 x i1>*
+  %v1 = load <512 x i1>, <512 x i1>* %v0, align 64
+  %v2 = tail call { <16 x i32>, <512 x i1> } @llvm.hexagon.V6.vsubcarry(<16 x i32> %a0, <16 x i32> %a1, <512 x i1> %v1)
+  %v3 = extractvalue { <16 x i32>, <512 x i1> } %v2, 0
+  ret <16 x i32> %v3
+}
+
+; Function Attrs: nounwind readnone
+declare { <16 x i32>, <512 x i1> } @llvm.hexagon.V6.vaddcarry(<16 x i32>, <16 x i32>, <512 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare { <16 x i32>, <512 x i1> } @llvm.hexagon.V6.vsubcarry(<16 x i32>, <16 x i32>, <512 x i1>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" "target-features"="+hvxv65,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hvx-loopidiom-memcpy.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,30 @@
+; RUN: opt -march=hexagon -hexagon-loop-idiom -S < %s | FileCheck %s
+
+; Make sure we don't convert load/store loops into memcpy if the access type
+; is a vector. Using vector instructions is generally better in such cases.
+
+; CHECK-NOT: @llvm.memcpy
+
+%s.0 = type { i32 }
+
+define void @f0(%s.0* noalias %a0, %s.0* noalias %a1) #0 align 2 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %v7, %b1 ], [ 0, %b0 ]
+  %v1 = mul nuw nsw i32 %v0, 64
+  %v2 = getelementptr %s.0, %s.0* %a0, i32 %v1
+  %v3 = getelementptr %s.0, %s.0* %a1, i32 %v1
+  %v4 = bitcast %s.0* %v2 to <64 x i32>*
+  %v5 = load <64 x i32>, <64 x i32>* %v4, align 256
+  %v6 = bitcast %s.0* %v3 to <64 x i32>*
+  store <64 x i32> %v5, <64 x i32>* %v6, align 256
+  %v7 = add nuw nsw i32 %v0, 1
+  br i1 undef, label %b1, label %b2
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/hvx-vzero.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hvx-vzero.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hvx-vzero.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hvx-vzero.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,34 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that V_vzero and W_vzero intrinsics work. The W_vzero intrinsic was added
+; for v65/hvx.
+
+
+; CHECK-LABEL: f0:
+; CHECK: [[VREG1:v([0-9]+)]] = vxor([[VREG1]],[[VREG1]])
+define void @f0(i16** nocapture %a0) #0 {
+b0:
+  %v0 = bitcast i16** %a0 to <16 x i32>*
+  %v1 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
+  store <16 x i32> %v1, <16 x i32>* %v0, align 64
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vd0() #1
+
+; CHECK-LABEL: f1:
+; CHECK: [[VREG2:v([0-9]+):([0-9]+).w]] = vsub([[VREG2]],[[VREG2]])
+define void @f1(i16** nocapture %a0) #0 {
+b0:
+  %v0 = bitcast i16** %a0 to <32 x i32>*
+  %v1 = tail call <32 x i32> @llvm.hexagon.V6.vdd0()
+  store <32 x i32> %v1, <32 x i32>* %v0, align 128
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdd0() #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" "target-features"="+hvxv65,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/hwloop-ice.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-ice.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-ice.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-ice.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,20 @@
+; RUN: llc -O2 -march=hexagon < %s
+; REQUIRES: asserts
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0) #0 {
+b0:
+  %v0 = icmp ugt i32 %a0, 1
+  br i1 %v0, label %b1, label %b2
+
+b1:                                               ; preds = %b1, %b0
+  %v1 = phi i32 [ %v2, %b1 ], [ 0, %b0 ]
+  %v2 = add nsw i32 %v1, 2
+  %v3 = icmp slt i32 %v2, 0
+  br i1 %v3, label %b1, label %b2
+
+b2:                                               ; preds = %b1, %b0
+  unreachable
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/hwloop-long.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-long.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-long.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-long.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,70 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we generate a hardware loop for long long counters.
+; Tests signed/unsigned GT, EQ, and NEQ cases.
+
+; signed GT case
+; CHECK-LABEL: f0:
+; CHECK: loop0
+define i32 @f0(i32* nocapture %a0) #0 {
+b0:
+  br label %b1
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
+  %v1 = phi i64 [ 0, %b0 ], [ %v6, %b1 ]
+  %v2 = trunc i64 %v1 to i32
+  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = add nsw i32 %v4, %v0
+  %v6 = add nsw i64 %v1, 1
+  %v7 = icmp slt i64 %v6, 8
+  br i1 %v7, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  ret i32 %v5
+}
+
+; unsigned signed GT case
+; CHECK-LABEL: f1:
+; CHECK: loop0
+define i32 @f1(i32* nocapture %a0) #0 {
+b0:
+  br label %b1
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
+  %v1 = phi i64 [ 0, %b0 ], [ %v6, %b1 ]
+  %v2 = trunc i64 %v1 to i32
+  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = add nsw i32 %v4, %v0
+  %v6 = add i64 %v1, 1
+  %v7 = icmp ult i64 %v6, 8
+  br i1 %v7, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  ret i32 %v5
+}
+
+; EQ case
+; CHECK-LABEL: f2:
+; CHECK: loop0
+define i32 @f2(i32* nocapture %a0) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ 0, %b0 ], [ %v5, %b1 ]
+  %v1 = phi i64 [ 0, %b0 ], [ %v6, %b1 ]
+  %v2 = trunc i64 %v1 to i32
+  %v3 = getelementptr inbounds i32, i32* %a0, i32 %v2
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = add nsw i32 %v4, %v0
+  %v6 = add nsw i64 %v1, 1
+  %v7 = icmp eq i64 %v6, 8
+  br i1 %v7, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret i32 %v5
+}
+
+attributes #0 = { nounwind readonly "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/hwloop-phi-subreg.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-phi-subreg.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-phi-subreg.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-phi-subreg.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,67 @@
+; RUN: llc -march=hexagon -hexagon-hwloop-preheader < %s
+; REQUIRES: asserts
+
+; Checks that a subreg in a Phi is propagated correctly when a
+; new preheader is created in the Hardware Loop pass.
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br label %b2
+
+b1:                                               ; preds = %b2, %b1
+  %v0 = or i64 0, undef
+  %v1 = add nsw i64 0, %v0
+  %v2 = add nsw i64 %v1, 0
+  %v3 = add nsw i64 %v2, 0
+  %v4 = add nsw i64 %v3, 0
+  %v5 = add nsw i64 %v4, 0
+  %v6 = add nsw i64 %v5, 0
+  %v7 = load i32, i32* undef, align 4
+  %v8 = ashr i32 %v7, 5
+  %v9 = sext i32 %v8 to i64
+  %v10 = mul nsw i64 %v9, %v9
+  %v11 = add nsw i64 %v6, %v10
+  %v12 = add nsw i64 %v11, 0
+  %v13 = add nsw i64 0, %v12
+  %v14 = add nsw i64 %v13, 0
+  %v15 = add nsw i64 %v14, 0
+  %v16 = add nsw i64 %v15, 0
+  %v17 = add nsw i64 %v16, 0
+  %v18 = add nsw i64 %v17, 0
+  %v19 = add nsw i64 %v18, 0
+  %v20 = add nsw i64 %v19, 0
+  %v21 = lshr i64 %v20, 32
+  %v22 = trunc i64 %v21 to i32
+  br i1 undef, label %b1, label %b3
+
+b2:                                               ; preds = %b5, %b0
+  br i1 undef, label %b1, label %b4
+
+b3:                                               ; preds = %b1
+  br i1 false, label %b5, label %b4
+
+b4:                                               ; preds = %b4, %b3, %b2
+  %v23 = phi i32 [ %v37, %b4 ], [ undef, %b2 ], [ %v22, %b3 ]
+  %v24 = zext i32 %v23 to i64
+  %v25 = shl nuw i64 %v24, 32
+  %v26 = or i64 %v25, 0
+  %v27 = add nsw i64 0, %v26
+  %v28 = add nsw i64 %v27, 0
+  %v29 = add nsw i64 %v28, 0
+  %v30 = add nsw i64 %v29, 0
+  %v31 = add nsw i64 %v30, 0
+  %v32 = add nsw i64 %v31, 0
+  %v33 = add nsw i64 %v32, 0
+  %v34 = add nsw i64 %v33, 0
+  %v35 = trunc i64 %v34 to i32
+  %v36 = lshr i64 %v34, 32
+  %v37 = trunc i64 %v36 to i32
+  %v38 = icmp slt i32 undef, undef
+  br i1 %v38, label %b4, label %b5
+
+b5:                                               ; preds = %b4, %b3
+  br label %b2
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/hwloop-swap.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-swap.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-swap.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-swap.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that the hardware loop pass does not alter the comparison
+; to use the result from the induction expression instead of
+; from the Phi.
+
+; CHECK: cmpb.gtu([[REG0:r[0-9]+]]
+; CHECK: [[REG0]] = add([[REG0]],
+
+define void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b1, label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ %v3, %b2 ], [ undef, %b1 ]
+  %v1 = trunc i32 %v0 to i8
+  %v2 = icmp ugt i8 %v1, 44
+  %v3 = add i32 %v0, -30
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/hwloop-with-return-call.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hwloop-with-return-call.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hwloop-with-return-call.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hwloop-with-return-call.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,26 @@
+; This test was return to make sure a hardware loop is not generated if a
+; returning call is present in the basic block.
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: loop
+; CHECK-NOT: endloop
+
+; Function Attrs: nounwind
+define void @f0() local_unnamed_addr #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %v4, %b1 ], [ 2, %b0 ]
+  %v1 = phi double [ %v3, %b1 ], [ 1.000000e+00, %b0 ]
+  %v2 = sitofp i32 %v0 to double
+  %v3 = fmul double %v2, %v1
+  %v4 = add nuw nsw i32 %v0, 1
+  %v5 = icmp eq i32 %v0, undef
+  br i1 %v5, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  %v6 = fdiv double undef, %v3
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" "target-features"="-hvx,-long-calls" }

Added: llvm/trunk/test/CodeGen/Hexagon/hx_V6_lo_hi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/hx_V6_lo_hi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/hx_V6_lo_hi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/hx_V6_lo_hi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,65 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that we do not generate v0 = vand(v1,v1)
+
+; CHECK-NOT: v{{[0-9]+}} = vand(v{{[0-9]+}},v{{[0-9]+}})
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture readonly %a0, i8* nocapture readonly %a1, i32 %a2, i8* nocapture %a3, i32 %a4, i32 %a5) #0 {
+b0:
+  %v0 = bitcast i8* %a1 to i64*
+  %v1 = load i64, i64* %v0, align 8, !tbaa !0
+  %v2 = shl i64 %v1, 8
+  %v3 = trunc i64 %v2 to i32
+  %v4 = trunc i64 %v1 to i32
+  %v5 = and i32 %v4, 16777215
+  %v6 = bitcast i8* %a0 to <16 x i32>*
+  %v7 = load <16 x i32>, <16 x i32>* %v6, align 64, !tbaa !4
+  %v8 = getelementptr inbounds i8, i8* %a0, i32 32
+  %v9 = bitcast i8* %v8 to <16 x i32>*
+  %v10 = load <16 x i32>, <16 x i32>* %v9, align 64, !tbaa !4
+  %v11 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v10, <16 x i32> %v7)
+  %v12 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v5, i32 0)
+  %v13 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %v11, i32 %v3, i32 0)
+  %v14 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v12)
+  %v15 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v14, <16 x i32> %v14, i32 %a2)
+  %v16 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v13)
+  %v17 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %v16, <16 x i32> %v16, i32 %a2)
+  %v18 = getelementptr inbounds i8, i8* %a3, i32 32
+  %v19 = bitcast i8* %v18 to <16 x i32>*
+  store <16 x i32> %v15, <16 x i32>* %v19, align 64, !tbaa !4
+  %v20 = bitcast i8* %a3 to <16 x i32>*
+  store <16 x i32> %v17, <16 x i32>* %v20, align 64, !tbaa !4
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>, i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+define i32 @f1() #2 {
+b0:
+  ret i32 0
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long long", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/i128-bitop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/i128-bitop.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/i128-bitop.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/i128-bitop.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,103 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { i32, i32, [10 x %s.1] }
+%s.1 = type { [4 x i32] }
+%s.2 = type { i128 }
+
+ at g0 = external global %s.0*
+
+; Function Attrs: nounwind ssp
+define void @f0(%s.2* nocapture %a0, i32 %a1) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 0
+  br label %b1
+
+b1:                                               ; preds = %b4, %b3, %b0
+  %v1 = phi i32 [ 0, %b0 ], [ %v14, %b4 ], [ %v13, %b3 ]
+  switch i32 %v1, label %b4 [
+    i32 0, label %b3
+    i32 1, label %b2
+  ]
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v2 = phi i32 [ 1, %b2 ], [ 0, %b1 ]
+  %v3 = phi i128 [ 64, %b2 ], [ 32, %b1 ]
+  %v4 = phi i128 [ -79228162495817593519834398721, %b2 ], [ -18446744069414584321, %b1 ]
+  %v5 = load %s.0*, %s.0** @g0, align 4
+  %v6 = getelementptr inbounds %s.0, %s.0* %v5, i32 0, i32 2, i32 %a1, i32 0, i32 %v2
+  %v7 = load i32, i32* %v6, align 4
+  %v8 = zext i32 %v7 to i128
+  %v9 = load i128, i128* %v0, align 4
+  %v10 = shl nuw nsw i128 %v8, %v3
+  %v11 = and i128 %v9, %v4
+  %v12 = or i128 %v11, %v10
+  store i128 %v12, i128* %v0, align 4
+  %v13 = add i32 %v1, 1
+  br label %b1
+
+b4:                                               ; preds = %b1
+  %v14 = add i32 %v1, 1
+  %v15 = icmp eq i32 %v14, 4
+  br i1 %v15, label %b5, label %b1
+
+b5:                                               ; preds = %b4
+  ret void
+}
+
+; Function Attrs: nounwind ssp
+define void @f1(%s.2* nocapture %a0, i32 %a1) #0 {
+b0:
+  %v0 = getelementptr inbounds %s.2, %s.2* %a0, i32 0, i32 0
+  br label %b1
+
+b1:                                               ; preds = %b5, %b4, %b0
+  %v1 = phi i32 [ 0, %b0 ], [ %v20, %b5 ], [ %v19, %b4 ]
+  switch i32 %v1, label %b5 [
+    i32 0, label %b2
+    i32 1, label %b3
+  ]
+
+b2:                                               ; preds = %b1
+  %v2 = load %s.0*, %s.0** @g0, align 4
+  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 2, i32 %a1, i32 0, i32 0
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = zext i32 %v4 to i128
+  %v6 = load i128, i128* %v0, align 4
+  %v7 = shl nuw nsw i128 %v5, 32
+  %v8 = and i128 %v6, -18446744069414584321
+  %v9 = or i128 %v8, %v7
+  br label %b4
+
+b3:                                               ; preds = %b1
+  %v10 = load %s.0*, %s.0** @g0, align 4
+  %v11 = getelementptr inbounds %s.0, %s.0* %v10, i32 0, i32 2, i32 %a1, i32 0, i32 1
+  %v12 = load i32, i32* %v11, align 4
+  %v13 = zext i32 %v12 to i128
+  %v14 = load i128, i128* %v0, align 4
+  %v15 = shl nuw nsw i128 %v13, 64
+  %v16 = and i128 %v14, -79228162495817593519834398721
+  %v17 = or i128 %v16, %v15
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  %v18 = phi i128 [ %v17, %b3 ], [ %v9, %b2 ]
+  store i128 %v18, i128* %v0, align 4
+  %v19 = add i32 %v1, 1
+  br label %b1
+
+b5:                                               ; preds = %b1
+  %v20 = add i32 %v1, 1
+  %v21 = icmp eq i32 %v20, 4
+  br i1 %v21, label %b6, label %b1
+
+b6:                                               ; preds = %b5
+  ret void
+}
+
+attributes #0 = { nounwind ssp }

Added: llvm/trunk/test/CodeGen/Hexagon/ignore-terminal-mbb.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/ignore-terminal-mbb.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/ignore-terminal-mbb.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/ignore-terminal-mbb.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-NOT: if{{.*}}jump{{.*}}-1
+; CHECK: memw
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  store i32 0, i32* undef, align 4, !tbaa !0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b4, label %b3
+
+b3:                                               ; preds = %b2
+  %v0 = or i32 undef, 2048
+  br label %b4
+
+b4:                                               ; preds = %b3, %b2
+  ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/initial-exec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/initial-exec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/initial-exec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/initial-exec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,15 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = external thread_local(initialexec) global i32
+ at g1 = external thread_local(initialexec) global i32
+
+; CHECK-DAG: r{{[0-9]+}} = memw(##g0 at IE)
+; CHECK-DAG: r{{[0-9]+}} = memw(##g1 at IE)
+define i32 @f0() {
+b0:
+  %v0 = load i32, i32* @g1, align 4
+  store i32 %v0, i32* @g0, align 4
+  ret i32 0
+}

Added: llvm/trunk/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/inline-asm-clobber-lr.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: allocframe
+
+target triple = "hexagon"
+
+define internal fastcc void @f0() {
+b0:
+  %v0 = tail call i32* asm sideeffect "call 1f; r31.h = #hi(TH); r31.l = #lo(TH); jumpr r31; 1: $0 = r31", "=r,~{r28},~{r31}"()
+  %v1 = bitcast i32* %v0 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 bitcast (void (...)* @f1 to i8*), i8* align 4 %v1, i32 12, i1 false)
+  ret void
+}
+
+declare void @f1(...)
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #0
+
+attributes #0 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/inline-asm-error.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/inline-asm-error.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/inline-asm-error.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/inline-asm-error.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,15 @@
+; RUN: not llc -march=hexagon < %s 2>&1 | FileCheck %s
+
+; CHECK: error: Don't know how to handle indirect register inputs yet for constraint 'r'
+
+%s.0 = type { i8*, i32, %s.1 }
+%s.1 = type { %s.2 }
+%s.2 = type { i32, i8* }
+
+define void @f0(%s.0* byval align 8 %a0) {
+b0:
+  call void asm sideeffect ".weak OFFSET_0;jump ##(OFFSET_0 + 0x14c15f0)", "*r"(%s.0* nonnull %a0), !srcloc !0
+  ret void
+}
+
+!0 = !{i32 10}

Added: llvm/trunk/test/CodeGen/Hexagon/insert.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/insert.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/insert.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/insert.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,61 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: r{{[0-9]+}}:{{[0-9]+}} = insert(r{{[0-9]+}},r{{[0-9]+}}:{{[0-9]+}})
+
+ at g0 = common global [512 x i16] zeroinitializer, align 8
+ at g1 = common global [512 x i8] zeroinitializer, align 8
+ at g2 = private unnamed_addr constant [4 x i8] c"%d\0A\00", align 8
+
+; Function Attrs: nounwind
+declare i32 @f0(i8* nocapture, ...) #0
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  br label %b4
+
+b1:                                               ; preds = %b3, %b1
+  %v0 = phi i32 [ 0, %b3 ], [ %v5, %b1 ]
+  %v1 = getelementptr [512 x i8], [512 x i8]* @g1, i32 0, i32 %v0
+  %v2 = load i8, i8* %v1, align 1, !tbaa !0
+  %v3 = zext i8 %v2 to i32
+  %v4 = tail call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g2, i32 0, i32 0), i32 %v3) #0
+  %v5 = add nsw i32 %v0, 1
+  %v6 = icmp eq i32 %v5, 512
+  br i1 %v6, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret i32 0
+
+b3:                                               ; preds = %b4
+  tail call void @f2(i16* getelementptr inbounds ([512 x i16], [512 x i16]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([512 x i8], [512 x i8]* @g1, i32 0, i32 0)) #0
+  br label %b1
+
+b4:                                               ; preds = %b4, %b0
+  %v7 = phi i64 [ 0, %b0 ], [ %v10, %b4 ]
+  %v8 = phi <2 x i32> [ <i32 0, i32 1>, %b0 ], [ %v12, %b4 ]
+  %v9 = phi <2 x i32> [ <i32 2, i32 3>, %b0 ], [ %v13, %b4 ]
+  %v10 = add nsw i64 %v7, 4
+  %v11 = trunc i64 %v7 to i32
+  %v12 = add <2 x i32> %v8, <i32 4, i32 4>
+  %v13 = add <2 x i32> %v9, <i32 4, i32 4>
+  %v14 = mul <2 x i32> %v8, <i32 7, i32 7>
+  %v15 = mul <2 x i32> %v9, <i32 7, i32 7>
+  %v16 = add <2 x i32> %v14, <i32 -840, i32 -840>
+  %v17 = add <2 x i32> %v15, <i32 -840, i32 -840>
+  %v18 = trunc <2 x i32> %v16 to <2 x i16>
+  %v19 = trunc <2 x i32> %v17 to <2 x i16>
+  %v20 = shufflevector <2 x i16> %v18, <2 x i16> %v19, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+  %v21 = getelementptr [512 x i16], [512 x i16]* @g0, i32 0, i32 %v11
+  %v22 = bitcast i16* %v21 to <4 x i16>*
+  store <4 x i16> %v20, <4 x i16>* %v22, align 8
+  %v23 = icmp slt i64 %v10, 512
+  br i1 %v23, label %b4, label %b3
+}
+
+declare void @f2(i16*, i8*)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/integer_abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/integer_abs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/integer_abs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/integer_abs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,14 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Check for integer abs instruction.
+; CHECK: r{{[0-9]+}} = abs
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = icmp slt i32 %a0, 0
+  %v1 = sub nsw i32 0, %a0
+  %v2 = select i1 %v0, i32 %v1, i32 %a0
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-alu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-alu.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-alu.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-alu.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,1034 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: test1:
+; CHECK: v{{[0-9]+}} = vand(v{{[0-9]+}},v{{[0-9]+}})
+define <16 x i32> @test1(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test2:
+; CHECK: v{{[0-9]+}} = vor(v{{[0-9]+}},v{{[0-9]+}})
+define <16 x i32> @test2(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vor(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test3:
+; CHECK: v{{[0-9]+}} = vxor(v{{[0-9]+}},v{{[0-9]+}})
+define <16 x i32> @test3(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vxor(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test4:
+; CHECK: v{{[0-9]+}}.w = vadd(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test4(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test5:
+; CHECK: v{{[0-9]+}}.ub = vadd(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub):sat
+define <16 x i32> @test5(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test6:
+; CHECK: v{{[0-9]+}}.uh = vadd(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh):sat
+define <16 x i32> @test6(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vadduhsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test7:
+; CHECK: v{{[0-9]+}}.h = vadd(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define <16 x i32> @test7(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaddhsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test8:
+; CHECK: v{{[0-9]+}}.w = vadd(v{{[0-9]+}}.w,v{{[0-9]+}}.w):sat
+define <16 x i32> @test8(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaddwsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test9:
+; CHECK: v{{[0-9]+}}.b = vsub(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define <16 x i32> @test9(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsubb(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test10:
+; CHECK: v{{[0-9]+}}.h = vsub(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test10(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test11:
+; CHECK: v{{[0-9]+}}.w = vsub(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test11(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test12:
+; CHECK: v{{[0-9]+}}.ub = vsub(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub):sat
+define <16 x i32> @test12(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test13:
+; CHECK: v{{[0-9]+}}.uh = vsub(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh):sat
+define <16 x i32> @test13(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test14:
+; CHECK: v{{[0-9]+}}.h = vsub(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define <16 x i32> @test14(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsubhsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test15:
+; CHECK: v{{[0-9]+}}.w = vsub(v{{[0-9]+}}.w,v{{[0-9]+}}.w):sat
+define <16 x i32> @test15(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsubwsat(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test16:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.b = vadd(v{{[0-9]+}}:{{[0-9]+}}.b,v{{[0-9]+}}:{{[0-9]+}}.b)
+define <32 x i32> @test16(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test17:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vadd(v{{[0-9]+}}:{{[0-9]+}}.h,v{{[0-9]+}}:{{[0-9]+}}.h)
+define <32 x i32> @test17(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test18:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vadd(v{{[0-9]+}}:{{[0-9]+}}.w,v{{[0-9]+}}:{{[0-9]+}}.w)
+define <32 x i32> @test18(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test19:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.ub = vadd(v{{[0-9]+}}:{{[0-9]+}}.ub,v{{[0-9]+}}:{{[0-9]+}}.ub):sat
+define <32 x i32> @test19(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddubsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test20:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vadd(v{{[0-9]+}}:{{[0-9]+}}.uh,v{{[0-9]+}}:{{[0-9]+}}.uh):sat
+define <32 x i32> @test20(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vadduhsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test21:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vadd(v{{[0-9]+}}:{{[0-9]+}}.h,v{{[0-9]+}}:{{[0-9]+}}.h):sat
+define <32 x i32> @test21(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test22:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vadd(v{{[0-9]+}}:{{[0-9]+}}.w,v{{[0-9]+}}:{{[0-9]+}}.w):sat
+define <32 x i32> @test22(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddwsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test23:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.b = vsub(v{{[0-9]+}}:{{[0-9]+}}.b,v{{[0-9]+}}:{{[0-9]+}}.b)
+define <32 x i32> @test23(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubb.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test24:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vsub(v{{[0-9]+}}:{{[0-9]+}}.h,v{{[0-9]+}}:{{[0-9]+}}.h)
+define <32 x i32> @test24(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubh.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test25:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vsub(v{{[0-9]+}}:{{[0-9]+}}.w,v{{[0-9]+}}:{{[0-9]+}}.w)
+define <32 x i32> @test25(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubw.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test26:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.ub = vsub(v{{[0-9]+}}:{{[0-9]+}}.ub,v{{[0-9]+}}:{{[0-9]+}}.ub):sat
+define <32 x i32> @test26(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsububsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test27:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vsub(v{{[0-9]+}}:{{[0-9]+}}.uh,v{{[0-9]+}}:{{[0-9]+}}.uh):sat
+define <32 x i32> @test27(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubuhsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test28:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vsub(v{{[0-9]+}}:{{[0-9]+}}.h,v{{[0-9]+}}:{{[0-9]+}}.h):sat
+define <32 x i32> @test28(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubhsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test29:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vsub(v{{[0-9]+}}:{{[0-9]+}}.w,v{{[0-9]+}}:{{[0-9]+}}.w):sat
+define <32 x i32> @test29(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubwsat.dv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test30:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vadd(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define <32 x i32> @test30(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test31:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vadd(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define <32 x i32> @test31(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test32:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vadd(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <32 x i32> @test32(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test33:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vsub(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define <32 x i32> @test33(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test34:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vsub(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define <32 x i32> @test34(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubuhw(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test35:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vsub(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <32 x i32> @test35(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsubhw(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test36:
+; CHECK: v{{[0-9]+}}.ub = vabsdiff(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define <16 x i32> @test36(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test37:
+; CHECK: v{{[0-9]+}}.uh = vabsdiff(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test37(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test38:
+; CHECK: v{{[0-9]+}}.uh = vabsdiff(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define <16 x i32> @test38(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffuh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test39:
+; CHECK: v{{[0-9]+}}.uw = vabsdiff(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test39(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffw(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test40:
+; CHECK: v{{[0-9]+}}.ub = vavg(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define <16 x i32> @test40(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test41:
+; CHECK: v{{[0-9]+}}.uh = vavg(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define <16 x i32> @test41(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavguh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test42:
+; CHECK: v{{[0-9]+}}.h = vavg(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test42(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test43:
+; CHECK: v{{[0-9]+}}.w = vavg(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test43(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavgw(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test44:
+; CHECK: v{{[0-9]+}}.b = vnavg(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define <16 x i32> @test44(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vnavgub(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test45:
+; CHECK: v{{[0-9]+}}.h = vnavg(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test45(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test46:
+; CHECK: v{{[0-9]+}}.w = vnavg(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test46(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vnavgw(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test47:
+; CHECK: v{{[0-9]+}}.ub = vavg(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub):rnd
+define <16 x i32> @test47(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavgubrnd(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test48:
+; CHECK: v{{[0-9]+}}.uh = vavg(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh):rnd
+define <16 x i32> @test48(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavguhrnd(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test49:
+; CHECK: v{{[0-9]+}}.h = vavg(v{{[0-9]+}}.h,v{{[0-9]+}}.h):rnd
+define <16 x i32> @test49(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavghrnd(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test50:
+; CHECK: v{{[0-9]+}}.w = vavg(v{{[0-9]+}}.w,v{{[0-9]+}}.w):rnd
+define <16 x i32> @test50(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vavgwrnd(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test51:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vmpa(v{{[0-9]+}}:{{[0-9]+}}.ub,v{{[0-9]+}}:{{[0-9]+}}.ub)
+define <32 x i32> @test51(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32> %a, <32 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test52:
+; CHECK: v{{[0-9]+}}.ub = vmin(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define <16 x i32> @test52(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test53:
+; CHECK: v{{[0-9]+}}.uh = vmin(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define <16 x i32> @test53(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test54:
+; CHECK: v{{[0-9]+}}.h = vmin(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test54(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vminh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test55:
+; CHECK: v{{[0-9]+}}.w = vmin(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test55(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vminw(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test56:
+; CHECK: v{{[0-9]+}}.ub = vmax(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define <16 x i32> @test56(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test57:
+; CHECK: v{{[0-9]+}}.uh = vmax(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define <16 x i32> @test57(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmaxuh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test58:
+; CHECK: v{{[0-9]+}}.h = vmax(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test58(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmaxh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test59:
+; CHECK: v{{[0-9]+}}.w = vmax(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test59(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmaxw(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test60:
+; CHECK: v{{[0-9]+}} = vdelta(v{{[0-9]+}},v{{[0-9]+}})
+define <16 x i32> @test60(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test61:
+; CHECK: v{{[0-9]+}} = vrdelta(v{{[0-9]+}},v{{[0-9]+}})
+define <16 x i32> @test61(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test62:
+; CHECK: v{{[0-9]+}}.b = vdeale(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define <16 x i32> @test62(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdealb4w(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test63:
+; CHECK: v{{[0-9]+}}.b = vshuffe(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define <16 x i32> @test63(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test64:
+; CHECK: v{{[0-9]+}}.b = vshuffo(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define <16 x i32> @test64(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test65:
+; CHECK: v{{[0-9]+}}.h = vshuffe(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test65(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test66:
+; CHECK: v{{[0-9]+}}.h = vshuffo(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test66(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test67:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vshuffoe(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <32 x i32> @test67(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vshufoeh(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test68:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.b = vshuffoe(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define <32 x i32> @test68(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vshufoeb(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test69:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}} = vcombine(v{{[0-9]+}},v{{[0-9]+}})
+define <32 x i32> @test69(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %a, <16 x i32> %b)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test70:
+; CHECK: v{{[0-9]+}}.ub = vsat(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test70(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test71:
+; CHECK: v{{[0-9]+}}.h = vsat(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test71(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test72:
+; CHECK: v{{[0-9]+}}.h = vround(v{{[0-9]+}}.w,v{{[0-9]+}}.w):sat
+define <16 x i32> @test72(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vroundwh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test73:
+; CHECK: v{{[0-9]+}}.uh = vround(v{{[0-9]+}}.w,v{{[0-9]+}}.w):sat
+define <16 x i32> @test73(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vroundwuh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test74:
+; CHECK: v{{[0-9]+}}.b = vround(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define <16 x i32> @test74(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vroundhb(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test75:
+; CHECK: v{{[0-9]+}}.ub = vround(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define <16 x i32> @test75(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vroundhub(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test76:
+; CHECK: v{{[0-9]+}}.w = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test76(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwv(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test77:
+; CHECK: v{{[0-9]+}}.w = vlsr(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test77(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test78:
+; CHECK: v{{[0-9]+}}.h = vlsr(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test78(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlsrhv(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test79:
+; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test79(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrhv(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test80:
+; CHECK: v{{[0-9]+}}.w = vasl(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define <16 x i32> @test80(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaslwv(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test81:
+; CHECK: v{{[0-9]+}}.h = vasl(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test81(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaslhv(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test82:
+; CHECK: v{{[0-9]+}}.b = vadd(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define <16 x i32> @test82(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaddb(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test83:
+; CHECK: v{{[0-9]+}}.h = vadd(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define <16 x i32> @test83(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %a, <16 x i32> %b)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test84:
+; CHECK: if (q{{[0-3]}}) v{{[0-9]+}}.b += v{{[0-9]+}}.b
+define <16 x i32> @test84(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vaddbq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test85:
+; CHECK: if (q{{[0-3]}}) v{{[0-9]+}}.h += v{{[0-9]+}}.h
+define <16 x i32> @test85(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vaddhq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test86:
+; CHECK: if (q{{[0-3]}}) v{{[0-9]+}}.w += v{{[0-9]+}}.w
+define <16 x i32> @test86(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vaddwq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test87:
+; CHECK: if (!q{{[0-3]}}) v{{[0-9]+}}.b += v{{[0-9]+}}.b
+define <16 x i32> @test87(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test88:
+; CHECK: if (!q{{[0-3]}}) v{{[0-9]+}}.h += v{{[0-9]+}}.h
+define <16 x i32> @test88(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vaddhnq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test89:
+; CHECK: if (!q{{[0-3]}}) v{{[0-9]+}}.w += v{{[0-9]+}}.w
+define <16 x i32> @test89(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test90:
+; CHECK: if (q{{[0-3]}}) v{{[0-9]+}}.b -= v{{[0-9]+}}.b
+define <16 x i32> @test90(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vsubbq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test91:
+; CHECK: if (q{{[0-3]}}) v{{[0-9]+}}.h -= v{{[0-9]+}}.h
+define <16 x i32> @test91(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vsubhq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test92:
+; CHECK: if (q{{[0-3]}}) v{{[0-9]+}}.w -= v{{[0-9]+}}.w
+define <16 x i32> @test92(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vsubwq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test93:
+; CHECK: if (!q{{[0-3]}}) v{{[0-9]+}}.b -= v{{[0-9]+}}.b
+define <16 x i32> @test93(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vsubbnq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test94:
+; CHECK: if (!q{{[0-3]}}) v{{[0-9]+}}.h -= v{{[0-9]+}}.h
+define <16 x i32> @test94(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vsubhnq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test95:
+; CHECK: if (!q{{[0-3]}}) v{{[0-9]+}}.w -= v{{[0-9]+}}.w
+define <16 x i32> @test95(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1> %0, <16 x i32> %c, <16 x i32> %b)
+  ret <16 x i32> %1
+}
+
+; CHECK-LABEL: test96:
+; CHECK: v{{[0-9]+}}.h = vabs(v{{[0-9]+}}.h)
+define <16 x i32> @test96(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test97:
+; CHECK: v{{[0-9]+}}.h = vabs(v{{[0-9]+}}.h):sat
+define <16 x i32> @test97(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsh.sat(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test98:
+; CHECK: v{{[0-9]+}}.w = vabs(v{{[0-9]+}}.w)
+define <16 x i32> @test98(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsw(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test99:
+; CHECK: v{{[0-9]+}}.w = vabs(v{{[0-9]+}}.w):sat
+define <16 x i32> @test99(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vabsw.sat(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test100:
+; CHECK: v{{[0-9]+}} = vnot(v{{[0-9]+}})
+define <16 x i32> @test100(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test101:
+; CHECK: v{{[0-9]+}}.h = vdeal(v{{[0-9]+}}.h)
+define <16 x i32> @test101(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test102:
+; CHECK: v{{[0-9]+}}.b = vdeal(v{{[0-9]+}}.b)
+define <16 x i32> @test102(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test103:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vunpack(v{{[0-9]+}}.ub)
+define <32 x i32> @test103(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test104:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vunpack(v{{[0-9]+}}.uh)
+define <32 x i32> @test104(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test105:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vunpack(v{{[0-9]+}}.b)
+define <32 x i32> @test105(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test106:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vunpack(v{{[0-9]+}}.h)
+define <32 x i32> @test106(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test107:
+; CHECK: v{{[0-9]+}}.h = vshuff(v{{[0-9]+}}.h)
+define <16 x i32> @test107(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test108:
+; CHECK: v{{[0-9]+}}.b = vshuff(v{{[0-9]+}}.b)
+define <16 x i32> @test108(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+; CHECK-LABEL: test109:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vzxt(v{{[0-9]+}}.ub)
+define <32 x i32> @test109(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test110:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vzxt(v{{[0-9]+}}.uh)
+define <32 x i32> @test110(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test111:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vsxt(v{{[0-9]+}}.b)
+define <32 x i32> @test111(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsb(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test112:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vsxt(v{{[0-9]+}}.h)
+define <32 x i32> @test112(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vsh(<16 x i32> %a)
+  ret <32 x i32> %0
+}
+
+; CHECK-LABEL: test113:
+; CHECK: v{{[0-9]+}} = v{{[0-9]+}}
+define <16 x i32> @test113(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vassign(<16 x i32> %a)
+  ret <16 x i32> %0
+}
+
+declare <16 x i32> @llvm.hexagon.V6.vadduhsat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddhsat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddwsat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubb(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsububsat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubhsat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubwsat(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddb.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddh.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddubsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vadduhsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddhsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddwsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubb.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubh.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubw.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsububsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubuhsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubhsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubwsat.dv(<32 x i32>, <32 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddubh(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsububh(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubuhw(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsubhw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffuh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavgub(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavguh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavgh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavgw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vnavgub(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vnavgh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vnavgw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavgubrnd(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavghrnd(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavguhrnd(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vavgwrnd(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpabuuv(<32 x i32>, <32 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vand(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vminub(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vminh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vminw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmaxub(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmaxuh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmaxh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmaxw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vdelta(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vrdelta(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vdealb4w(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vshufoeh(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vshufoeb(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsathub(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vroundwh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vroundhb(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vroundwuh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vroundhub(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrwv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vlsrwv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrhv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vlsrhv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaslwv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaslhv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddb(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vor(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vxor(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddubsat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddbq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddhq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddwq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddhnq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubbq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubhq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubwq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubbnq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubhnq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsh.sat(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsw(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vabsw.sat(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vnot(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vzb(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vzh(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsb(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vsh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vassign(<16 x i32>) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-misc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-misc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-misc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-misc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,587 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at l = external global <32 x i32>
+ at k = external global <16 x i32>
+ at h = external global <16 x i32>
+ at n = external global i64
+ at m = external global i32
+
+; CHECK-LABEL: test1:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vrmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b,#1)
+define void @test1(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32> %a, i32 %b, i32 1)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test2:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vrsad(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.ub,#1)
+define void @test2(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vrsadubi(<32 x i32> %a, i32 %b, i32 1)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test3:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vrmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.ub,#1)
+define void @test3(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpyubi(<32 x i32> %a, i32 %b, i32 1)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test4:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b,#1)
+define void @test4(<32 x i32> %a, <32 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32> %a, <32 x i32> %b, i32 %c, i32 1)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test5:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vrsad(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.ub,#1)
+define void @test5(<32 x i32> %a, <32 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vrsadubi.acc(<32 x i32> %a, <32 x i32> %b, i32 %c, i32 1)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test6:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vrmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.ub,#0)
+define void @test6(<32 x i32> %a, <32 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vrmpyubi.acc(<32 x i32> %a, <32 x i32> %b, i32 %c, i32 0)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test7:
+; CHECK: v{{[0-9]+}} = valign(v{{[0-9]+}},v{{[0-9]+}},r{{[0-9]+}})
+define void @test7(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test8:
+; CHECK: v{{[0-9]+}} = vlalign(v{{[0-9]+}},v{{[0-9]+}},r{{[0-9]+}})
+define void @test8(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test9:
+; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-9]+}})
+define void @test9(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test10:
+; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-9]+}}):sat
+define void @test10(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test11:
+; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-9]+}}):rnd:sat
+define void @test11(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwhrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test12:
+; CHECK: v{{[0-9]+}}.uh = vasr(v{{[0-9]+}}.w,v{{[0-9]+}}.w,r{{[0-9]+}}):sat
+define void @test12(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test13:
+; CHECK: v{{[0-9]+}}.ub = vasr(v{{[0-9]+}}.h,v{{[0-9]+}}.h,r{{[0-9]+}}):sat
+define void @test13(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test14:
+; CHECK: v{{[0-9]+}}.ub = vasr(v{{[0-9]+}}.h,v{{[0-9]+}}.h,r{{[0-9]+}}):rnd:sat
+define void @test14(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test15:
+; CHECK: v{{[0-9]+}}.b = vasr(v{{[0-9]+}}.h,v{{[0-9]+}}.h,r{{[0-9]+}}):rnd:sat
+define void @test15(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrhbrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test16:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h |= vunpacko(v{{[0-9]+}}.b)
+define void @test16(<32 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackob(<32 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test17:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w |= vunpacko(v{{[0-9]+}}.h)
+define void @test17(<32 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackoh(<32 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test18:
+; CHECK: v{{[0-9]+}} = valign(v{{[0-9]+}},v{{[0-9]+}},#3)
+define void @test18(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %a, <16 x i32> %b, i32 3)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test19:
+; CHECK: v{{[0-9]+}} = vlalign(v{{[0-9]+}},v{{[0-9]+}},#3)
+define void @test19(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %a, <16 x i32> %b, i32 3)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test20:
+; CHECK: v{{[0-9]+}} = vmux(q{{[0-3]+}},v{{[0-9]+}},v{{[0-9]+}})
+define void @test20(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %0, <16 x i32> %b, <16 x i32> %c)
+  store <16 x i32> %1, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test21:
+; CHECK: q{{[0-3]+}} = and(q{{[0-3]+}},q{{[0-3]+}})
+define void @test21(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = bitcast <16 x i32> %b to <512 x i1>
+  %2 = tail call <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1> %0, <512 x i1> %1)
+  store <512 x i1> %2, <512 x i1>* bitcast (<16 x i32>* @h to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test22:
+; CHECK: q{{[0-3]+}} = or(q{{[0-3]+}},q{{[0-3]+}})
+define void @test22(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = bitcast <16 x i32> %b to <512 x i1>
+  %2 = tail call <512 x i1> @llvm.hexagon.V6.pred.or(<512 x i1> %0, <512 x i1> %1)
+  store <512 x i1> %2, <512 x i1>* bitcast (<16 x i32>* @h to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test23:
+; CHECK: q{{[0-3]+}} = not(q{{[0-3]+}})
+define void @test23(<16 x i32> %a) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.pred.not(<512 x i1> %0)
+  store <512 x i1> %1, <512 x i1>* bitcast (<16 x i32>* @h to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test24:
+; CHECK: q{{[0-3]+}} = xor(q{{[0-3]+}},q{{[0-3]+}})
+define void @test24(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = bitcast <16 x i32> %b to <512 x i1>
+  %2 = tail call <512 x i1> @llvm.hexagon.V6.pred.xor(<512 x i1> %0, <512 x i1> %1)
+  store <512 x i1> %2, <512 x i1>* bitcast (<16 x i32>* @h to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test25:
+; CHECK: q{{[0-3]+}} = or(q{{[0-3]+}},!q{{[0-3]+}})
+define void @test25(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = bitcast <16 x i32> %b to <512 x i1>
+  %2 = tail call <512 x i1> @llvm.hexagon.V6.pred.or.n(<512 x i1> %0, <512 x i1> %1)
+  store <512 x i1> %2, <512 x i1>* bitcast (<16 x i32>* @h to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test26:
+; CHECK: q{{[0-3]+}} = and(q{{[0-3]+}},!q{{[0-3]+}})
+define void @test26(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = bitcast <16 x i32> %b to <512 x i1>
+  %2 = tail call <512 x i1> @llvm.hexagon.V6.pred.and.n(<512 x i1> %0, <512 x i1> %1)
+  store <512 x i1> %2, <512 x i1>* bitcast (<16 x i32>* @h to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test27:
+; CHECK: q{{[0-3]+}} = vcmp.gt(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test27(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %a, <16 x i32> %b)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test28:
+; CHECK: q{{[0-3]+}} = vcmp.gt(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test28(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.vgth(<16 x i32> %a, <16 x i32> %b)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test29:
+; CHECK: q{{[0-3]+}} = vcmp.eq(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test29(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.veqh(<16 x i32> %a, <16 x i32> %b)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test30:
+; CHECK: q{{[0-3]+}} = vcmp.gt(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test30(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.vgtw(<16 x i32> %a, <16 x i32> %b)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test31:
+; CHECK: q{{[0-3]+}} = vcmp.eq(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test31(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.veqw(<16 x i32> %a, <16 x i32> %b)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test32:
+; CHECK: q{{[0-3]+}} = vcmp.gt(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define void @test32(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.vgtuh(<16 x i32> %a, <16 x i32> %b)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test33:
+; CHECK: v{{[0-9]+}} |= vand(q{{[0-3]+}},r{{[0-9]+}})
+define void @test33(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %b to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32> %a, <512 x i1> %0, i32 %c)
+  store <16 x i32> %1, <16 x i32>* @h, align 64
+  ret void
+}
+
+; CHECK-LABEL: test34:
+; CHECK: q{{[0-3]+}} |= vand(v{{[0-9]+}},r{{[0-9]+}})
+define void @test34(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vandvrt.acc(<512 x i1> %0, <16 x i32> %b, i32 %c)
+  store <512 x i1> %1, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test35:
+; CHECK: v{{[0-9]+}} = vand(q{{[0-3]+}},r{{[0-9]+}})
+define void @test35(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = bitcast <16 x i32> %a to <512 x i1>
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1> %0, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @h, align 64
+  ret void
+}
+
+; CHECK-LABEL: test36:
+; CHECK: q{{[0-3]+}} = vand(v{{[0-9]+}},r{{[0-9]+}})
+define void @test36(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %a, i32 %b)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test37:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} = rol(r{{[0-9]+}}:{{[0-9]+}},#38)
+define void @test37(i64 %a) #0 {
+entry:
+  %0 = tail call i64 @llvm.hexagon.S6.rol.i.p(i64 %a, i32 38)
+  store i64 %0, i64* @n, align 8
+  ret void
+}
+
+; CHECK-LABEL: test38:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} += rol(r{{[0-9]+}}:{{[0-9]+}},#36)
+define void @test38(i64 %a, i64 %b) #0 {
+entry:
+  %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.acc(i64 %a, i64 %b, i32 36)
+  store i64 %0, i64* @n, align 8
+  ret void
+}
+
+; CHECK-LABEL: test39:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} &= rol(r{{[0-9]+}}:{{[0-9]+}},#25)
+define void @test39(i64 %a, i64 %b) #0 {
+entry:
+  %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.and(i64 %a, i64 %b, i32 25)
+  store i64 %0, i64* @n, align 8
+  ret void
+}
+
+; CHECK-LABEL: test40:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} -= rol(r{{[0-9]+}}:{{[0-9]+}},#20)
+define void @test40(i64 %a, i64 %b) #0 {
+entry:
+  %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.nac(i64 %a, i64 %b, i32 20)
+  store i64 %0, i64* @n, align 8
+  ret void
+}
+
+; CHECK-LABEL: test41:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} |= rol(r{{[0-9]+}}:{{[0-9]+}},#22)
+define void @test41(i64 %a, i64 %b) #0 {
+entry:
+  %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.or(i64 %a, i64 %b, i32 22)
+  store i64 %0, i64* @n, align 8
+  ret void
+}
+
+; CHECK-LABEL: test42:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} ^= rol(r{{[0-9]+}}:{{[0-9]+}},#25)
+define void @test42(i64 %a, i64 %b) #0 {
+entry:
+  %0 = tail call i64 @llvm.hexagon.S6.rol.i.p.xacc(i64 %a, i64 %b, i32 25)
+  store i64 %0, i64* @n, align 8
+  ret void
+}
+
+; CHECK-LABEL: test43:
+; CHECK: r{{[0-9]+}} = rol(r{{[0-9]+}},#14)
+define void @test43(i32 %a) #0 {
+entry:
+  %0 = tail call i32 @llvm.hexagon.S6.rol.i.r(i32 %a, i32 14)
+  %conv = sext i32 %0 to i64
+  store i64 %conv, i64* @n, align 8
+  ret void
+}
+
+; CHECK-LABEL: test44:
+; CHECK: r{{[0-9]+}} += rol(r{{[0-9]+}},#12)
+define void @test44(i32 %a, i32 %b) #0 {
+entry:
+  %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.acc(i32 %a, i32 %b, i32 12)
+  store i32 %0, i32* @m, align 4
+  ret void
+}
+
+; CHECK-LABEL: test45:
+; CHECK: r{{[0-9]+}} &= rol(r{{[0-9]+}},#18)
+define void @test45(i32 %a, i32 %b) #0 {
+entry:
+  %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.and(i32 %a, i32 %b, i32 18)
+  store i32 %0, i32* @m, align 4
+  ret void
+}
+
+; CHECK-LABEL: test46:
+; CHECK: r{{[0-9]+}} -= rol(r{{[0-9]+}},#31)
+define void @test46(i32 %a, i32 %b) #0 {
+entry:
+  %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.nac(i32 %a, i32 %b, i32 31)
+  store i32 %0, i32* @m, align 4
+  ret void
+}
+
+; CHECK-LABEL: test47:
+; CHECK: r{{[0-9]+}} |= rol(r{{[0-9]+}},#30)
+define void @test47(i32 %a, i32 %b) #0 {
+entry:
+  %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.or(i32 %a, i32 %b, i32 30)
+  store i32 %0, i32* @m, align 4
+  ret void
+}
+
+; CHECK-LABEL: test48:
+; CHECK: r{{[0-9]+}} ^= rol(r{{[0-9]+}},#31)
+define void @test48(i32 %a, i32 %b) #0 {
+entry:
+  %0 = tail call i32 @llvm.hexagon.S6.rol.i.r.xacc(i32 %a, i32 %b, i32 31)
+  store i32 %0, i32* @m, align 4
+  ret void
+}
+
+; CHECK-LABEL: test49:
+; CHECK: r{{[0-9]+}} = vextract(v{{[0-9]+}},r{{[0-9]+}})
+define void @test49(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call i32 @llvm.hexagon.V6.extractw(<16 x i32> %a, i32 %b)
+  store i32 %0, i32* @m, align 4
+  ret void
+}
+
+; CHECK-LABEL: test50:
+; CHECK: v{{[0-9]+}} = vsplat(r{{[0-9]+}})
+define void @test50(i32 %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %a)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test51:
+; CHECK: q{{[0-3]}} = vsetq(r{{[0-9]+}})
+define void @test51(i32 %a) #0 {
+entry:
+  %0 = tail call <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32 %a)
+  store <512 x i1> %0, <512 x i1>* bitcast (<16 x i32>* @k to <512 x i1>*), align 64
+  ret void
+}
+
+; CHECK-LABEL: test52:
+; CHECK: v{{[0-9]+}}.b = vlut32(v{{[0-9]+}}.b,v{{[0-9]+}}.b,r{{[0-9]+}})
+define void @test52(<16 x i32> %a, <16 x i32> %b, i32 %c) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32> %a, <16 x i32> %b, i32 %c)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test53:
+; CHECK: v{{[0-9]+}}.b |= vlut32(v{{[0-9]+}}.b,v{{[0-9]+}}.b,r{{[0-9]+}})
+define void @test53(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d)
+  store <16 x i32> %0, <16 x i32>* @k, align 64
+  ret void
+}
+
+; CHECK-LABEL: test54:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h |= vlut16(v{{[0-9]+}}.b,v{{[0-9]+}}.h,r{{[0-9]+}})
+define void @test54(<32 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vlutvwh.oracc(<32 x i32> %a, <16 x i32> %b, <16 x i32> %c, i32 %d)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test55:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vlut16(v{{[0-9]+}}.b,v{{[0-9]+}}.h,r{{[0-9]+}})
+define void @test55(<16 x i32> %a, <16 x i32> %b, i32 %l) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32> %a, <16 x i32> %b, i32 %l)
+  store <32 x i32> %0, <32 x i32>* @l, align 128
+  ret void
+}
+
+; CHECK-LABEL: test56:
+; CHECK: v{{[0-9]+}}.w = vinsert(r{{[0-9]+}})
+define void @test56(i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @k, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32> %0, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @k, align 64
+  ret void
+}
+
+declare <32 x i32> @llvm.hexagon.V6.vrmpybusi(<32 x i32>, i32, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vrsadubi(<32 x i32>, i32, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vrmpyubi(<32 x i32>, i32, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vrmpybusi.acc(<32 x i32>, <32 x i32>, i32, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vrsadubi.acc(<32 x i32>, <32 x i32>, i32, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vrmpyubi.acc(<32 x i32>, <32 x i32>, i32, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrwh(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrwhsat(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrwhrndsat(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrwuhsat(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrhubrndsat(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrhbrndsat(<16 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackob(<32 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackoh(<32 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1>, <512 x i1>) #0
+declare <512 x i1> @llvm.hexagon.V6.pred.or(<512 x i1>, <512 x i1>) #0
+declare <512 x i1> @llvm.hexagon.V6.pred.not(<512 x i1>) #0
+declare <512 x i1> @llvm.hexagon.V6.pred.xor(<512 x i1>, <512 x i1>) #0
+declare <512 x i1> @llvm.hexagon.V6.pred.or.n(<512 x i1>, <512 x i1>) #0
+declare <512 x i1> @llvm.hexagon.V6.pred.and.n(<512 x i1>, <512 x i1>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgth(<16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqh(<16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtw(<16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqw(<16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtuh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vandqrt.acc(<16 x i32>, <512 x i1>, i32) #0
+declare <512 x i1> @llvm.hexagon.V6.vandvrt.acc(<512 x i1>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1>, i32) #0
+declare <512 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32) #0
+declare i64 @llvm.hexagon.S6.rol.i.p(i64, i32) #0
+declare i64 @llvm.hexagon.S6.rol.i.p.acc(i64, i64, i32) #0
+declare i64 @llvm.hexagon.S6.rol.i.p.and(i64, i64, i32) #0
+declare i64 @llvm.hexagon.S6.rol.i.p.nac(i64, i64, i32) #0
+declare i64 @llvm.hexagon.S6.rol.i.p.or(i64, i64, i32) #0
+declare i64 @llvm.hexagon.S6.rol.i.p.xacc(i64, i64, i32) #0
+declare i32 @llvm.hexagon.S6.rol.i.r(i32, i32) #0
+declare i32 @llvm.hexagon.S6.rol.i.r.acc(i32, i32, i32) #0
+declare i32 @llvm.hexagon.S6.rol.i.r.and(i32, i32, i32) #0
+declare i32 @llvm.hexagon.S6.rol.i.r.nac(i32, i32, i32) #0
+declare i32 @llvm.hexagon.S6.rol.i.r.or(i32, i32, i32) #0
+declare i32 @llvm.hexagon.S6.rol.i.r.xacc(i32, i32, i32) #0
+declare i32 @llvm.hexagon.V6.extractw(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #0
+declare <512 x i1> @llvm.hexagon.V6.pred.scalar2(i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vlutvwh(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32>, <16 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vlutvwh.oracc(<32 x i32>, <16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vinsertwr(<16 x i32>, i32) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-permute.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-permute.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-permute.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-permute.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,167 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at d = external global <16 x i32>
+ at c = external global <32 x i32>
+
+; CHECK-LABEL: test1:
+; CHECK: v{{[0-9]+}}.b = vpacke(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test1(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackeb(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test2:
+; CHECK: v{{[0-9]+}}.h = vpacke(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test2(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test3:
+; CHECK: v{{[0-9]+}}.ub = vpack(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define void @test3(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test4:
+; CHECK: v{{[0-9]+}}.b = vpack(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define void @test4(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackhb.sat(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test5:
+; CHECK: v{{[0-9]+}}.uh = vpack(v{{[0-9]+}}.w,v{{[0-9]+}}.w):sat
+define void @test5(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackwuh.sat(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test6:
+; CHECK: v{{[0-9]+}}.h = vpack(v{{[0-9]+}}.w,v{{[0-9]+}}.w):sat
+define void @test6(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackwh.sat(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test7:
+; CHECK: v{{[0-9]+}}.b = vpacko(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test7(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test8:
+; CHECK: v{{[0-9]+}}.h = vpacko(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test8(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpackoh(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test9:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vunpack(v{{[0-9]+}}.ub)
+define void @test9(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %a)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test10:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vunpack(v{{[0-9]+}}.uh)
+define void @test10(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32> %a)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test11:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vunpack(v{{[0-9]+}}.b)
+define void @test11(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32> %a)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test12:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vunpack(v{{[0-9]+}}.h)
+define void @test12(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32> %a)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test13:
+; CHECK: v{{[0-9]+}}.h = vdeal(v{{[0-9]+}}.h)
+define void @test13(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test14:
+; CHECK: v{{[0-9]+}}.b = vdeal(v{{[0-9]+}}.b)
+define void @test14(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test15:
+; CHECK: v{{[0-9]+}}.h = vshuff(v{{[0-9]+}}.h)
+define void @test15(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test16:
+; CHECK: v{{[0-9]+}}.b = vshuff(v{{[0-9]+}}.b)
+define void @test16(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+declare <16 x i32> @llvm.hexagon.V6.vpackeb(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpackeh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpackhub.sat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpackhb.sat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpackwuh.sat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpackwh.sat(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpackob(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpackoh(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackuh(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackb(<16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vunpackh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vdealh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vdealb(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshuffh(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-shift.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-shift.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-shift.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,56 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at d = external global <16 x i32>
+
+; CHECK-LABEL: test18:
+; CHECK: v{{[0-9]+}}.uw = vcl0(v{{[0-9]+}}.uw)
+define void @test18(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vcl0w(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test19:
+; CHECK: v{{[0-9]+}}.h = vpopcount(v{{[0-9]+}}.h)
+define void @test19(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vpopcounth(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test20:
+; CHECK: v{{[0-9]+}}.uh = vcl0(v{{[0-9]+}}.uh)
+define void @test20(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vcl0h(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test21:
+; CHECK: v{{[0-9]+}}.w = vnormamt(v{{[0-9]+}}.w)
+define void @test21(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vnormamtw(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test22:
+; CHECK: v{{[0-9]+}}.h = vnormamt(v{{[0-9]+}}.h)
+define void @test22(<16 x i32> %a) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vnormamth(<16 x i32> %a)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+declare <16 x i32> @llvm.hexagon.V6.vcl0w(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vpopcounth(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vcl0h(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vnormamtw(<16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vnormamth(<16 x i32>) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vcmp.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,330 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at d = external global <16 x i32>
+
+; CHECK-LABEL: test1:
+; CHECK: q{{[0-9]}} &= vcmp.eq(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test1(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqb.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test2:
+; CHECK: q{{[0-9]}} &= vcmp.eq(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test2(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqh.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test3:
+; CHECK: q{{[0-9]}} &= vcmp.eq(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test3(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqw.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test4:
+; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test4(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtb.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test5:
+; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test5(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgth.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test6:
+; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test6(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtw.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test7:
+; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test7(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtub.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test8:
+; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define void @test8(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtuh.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test9:
+; CHECK: q{{[0-9]}} &= vcmp.gt(v{{[0-9]+}}.uw,v{{[0-9]+}}.uw)
+define void @test9(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtuw.and(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test10:
+; CHECK: q{{[0-9]}} |= vcmp.eq(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test10(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqb.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test11:
+; CHECK: q{{[0-9]}} |= vcmp.eq(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test11(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqh.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test12:
+; CHECK: q{{[0-9]}} |= vcmp.eq(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test12(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqw.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test13:
+; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test13(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtb.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test14:
+; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test14(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgth.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test15:
+; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test15(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtw.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test16:
+; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test16(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtub.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test17:
+; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define void @test17(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtuh.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test18:
+; CHECK: q{{[0-9]}} |= vcmp.gt(v{{[0-9]+}}.uw,v{{[0-9]+}}.uw)
+define void @test18(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtuw.or(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test19:
+; CHECK: q{{[0-9]}} ^= vcmp.eq(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test19(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqb.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test20:
+; CHECK: q{{[0-9]}} ^= vcmp.eq(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test20(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqh.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test21:
+; CHECK: q{{[0-9]}} ^= vcmp.eq(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test21(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.veqw.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test22:
+; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test22(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtb.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test23:
+; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test23(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgth.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test24:
+; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.w,v{{[0-9]+}}.w)
+define void @test24(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtw.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test25:
+; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test25(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtub.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test26:
+; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define void @test26(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtuh.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test27:
+; CHECK: q{{[0-9]}} ^= vcmp.gt(v{{[0-9]+}}.uw,v{{[0-9]+}}.uw)
+define void @test27(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <512 x i1>, <512 x i1>* bitcast (<16 x i32>* @d to <512 x i1>*), align 64
+  %1 = tail call <512 x i1> @llvm.hexagon.V6.vgtuw.xor(<512 x i1> %0, <16 x i32> %a, <16 x i32> %b)
+  %2 = bitcast <512 x i1> %1 to <16 x i32>
+  store <16 x i32> %2, <16 x i32>* @d, align 64
+  ret void
+}
+
+declare <512 x i1> @llvm.hexagon.V6.veqb.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqh.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqw.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtb.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgth.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtw.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtub.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtuh.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtuw.and(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqb.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqh.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqw.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtb.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgth.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtw.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtub.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtuh.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtuw.or(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqb.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqh.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.veqw.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtb.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgth.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtw.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtub.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtuh.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+declare <512 x i1> @llvm.hexagon.V6.vgtuw.xor(<512 x i1>, <16 x i32>, <16 x i32>) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc-128B.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,432 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at c = external global <64 x i32>
+ at d = external global <32 x i32>
+
+; CHECK-LABEL: test1:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.b,r{{[0-9]+}}.b)
+define void @test1(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vtmpyb.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test2:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test2(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vtmpybus.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test3:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test3(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vtmpyhb.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test4:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test4(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhb.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test5:
+; CHECK: v{{[0-9]+}}.uw += vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
+define void @test5(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vrmpyub.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test6:
+; CHECK: v{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test6(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vrmpybus.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test7:
+; CHECK: v{{[0-9]+}}.h += vdmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test7(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vdmpybus.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test8:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test8(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test9:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.uh):sat
+define void @test9(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test10:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.uh,#1):sat
+define void @test10(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc.128B(<32 x i32> %0, <64 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test11:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test11(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhisat.acc.128B(<32 x i32> %0, <64 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test12:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test12(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vdmpyhsat.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test13:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test13(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test14:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test14(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test15:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpa(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test15(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test16:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpa(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test16(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vmpahb.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test17:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test17(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vmpyhsat.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test18:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vmpy(v{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
+define void @test18(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test19:
+; CHECK: v{{[0-9]+}}.w += vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.b)
+define void @test19(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vmpyiwb.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test20:
+; CHECK: v{{[0-9]+}}.w += vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.h)
+define void @test20(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vmpyiwh.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test21:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vdsad(v{{[0-9]+}}:{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
+define void @test21(<64 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <64 x i32>, align 256
+  %b.addr = alloca i32, align 4
+  store <64 x i32> %a, <64 x i32>* %a.addr, align 256
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <64 x i32>, <64 x i32>* %a.addr, align 256
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vdsaduh.acc.128B(<64 x i32> %0, <64 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+; CHECK-LABEL: test22:
+; CHECK: v{{[0-9]+}}.h += vmpyi(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test22(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vmpyihb.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test23:
+; CHECK: v{{[0-9]+}}.w += vasl(v{{[0-9]+}}.w,r{{[0-9]+}})
+define void @test23(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vaslw.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test24:
+; CHECK: v{{[0-9]+}}.w += vasr(v{{[0-9]+}}.w,r{{[0-9]+}})
+define void @test24(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <32 x i32>, <32 x i32>* @d, align 128
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <32 x i32> @llvm.hexagon.V6.vasrw.acc.128B(<32 x i32> %0, <32 x i32> %1, i32 %2)
+  store <32 x i32> %3, <32 x i32>* @d, align 128
+  ret void
+}
+
+; CHECK-LABEL: test25:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh += vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
+define void @test25(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %a.addr = alloca <32 x i32>, align 128
+  %b.addr = alloca i32, align 4
+  store <32 x i32> %a, <32 x i32>* %a.addr, align 128
+  store i32 %b, i32* %b.addr, align 4
+  %0 = load <64 x i32>, <64 x i32>* @c, align 256
+  %1 = load <32 x i32>, <32 x i32>* %a.addr, align 128
+  %2 = load i32, i32* %b.addr, align 4
+  %3 = call <64 x i32> @llvm.hexagon.V6.vmpyub.acc.128B(<64 x i32> %0, <32 x i32> %1, i32 %2)
+  store <64 x i32> %3, <64 x i32>* @c, align 256
+  ret void
+}
+
+declare <64 x i32> @llvm.hexagon.V6.vtmpyb.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vtmpybus.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vtmpyhb.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpyhb.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vrmpyub.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vrmpybus.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc.128B(<32 x i32>, <64 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpyhisat.acc.128B(<32 x i32>, <64 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpyhsat.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vmpybus.acc.128B(<64 x i32>, <32 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vmpabus.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vmpahb.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vmpyhsat.acc.128B(<64 x i32>, <32 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vmpyuh.acc.128B(<64 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyiwb.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyiwh.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vdsaduh.acc.128B(<64 x i32>, <64 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyihb.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vaslw.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vasrw.acc.128B(<32 x i32>, <32 x i32>, i32) #0
+declare <64 x i32> @llvm.hexagon.V6.vmpyub.acc.128B(<64 x i32>, <32 x i32>, i32) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy-acc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,447 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at c = external global <32 x i32>
+ at d = external global <16 x i32>
+
+; CHECK-LABEL: test1:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.b,r{{[0-9]+}}.b)
+define void @test1(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyb.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test2:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test2(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vtmpybus.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test3:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vtmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test3(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyhb.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test4:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test4(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhb.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test5:
+; CHECK: v{{[0-9]+}}.uw += vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
+define void @test5(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyub.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test6:
+; CHECK: v{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test6(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybus.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test7:
+; CHECK: v{{[0-9]+}}.h += vdmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test7(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test8:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test8(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test9:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.uh):sat
+define void @test9(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test10:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.uh,#1):sat
+define void @test10(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc(<16 x i32> %0, <32 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test11:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test11(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhisat.acc(<16 x i32> %0, <32 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test12:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test12(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsat.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test13:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test13(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test14:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test14(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test15:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpa(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test15(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test16:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpa(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test16(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test17:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test17(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test18:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vmpy(v{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
+define void @test18(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuh.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test19:
+; CHECK: v{{[0-9]+}}.w += vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.b)
+define void @test19(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test20:
+; CHECK: v{{[0-9]+}}.w += vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.h)
+define void @test20(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test21:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vdsad(v{{[0-9]+}}:{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
+define void @test21(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vdsaduh.acc(<32 x i32> %0, <32 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test22:
+; CHECK: v{{[0-9]+}}.h += vmpyi(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test22(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyihb.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test23:
+; CHECK: v{{[0-9]+}}.w += vasl(v{{[0-9]+}}.w,r{{[0-9]+}})
+define void @test23(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test24:
+; CHECK: v{{[0-9]+}}.w += vasr(v{{[0-9]+}}.w,r{{[0-9]+}})
+define void @test24(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32> %0, <16 x i32> %a, i32 %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test25:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh += vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
+define void @test25(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub.acc(<32 x i32> %0, <16 x i32> %a, i32 %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test26:
+; CHECK: v{{[0-9]+}}.w += vdmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define void @test26(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test27:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.b)
+define void @test27(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpybusv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test28:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h += vmpy(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test28(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpybv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test29:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.uh)
+define void @test29(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhus.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test30:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w += vmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test30(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test31:
+; CHECK: v{{[0-9]+}}.w += vmpyie(v{{[0-9]+}}.w,v{{[0-9]+}}.h)
+define void @test31(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewh.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test32:
+; CHECK: v{{[0-9]+}}.w += vmpyie(v{{[0-9]+}}.w,v{{[0-9]+}}.uh)
+define void @test32(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test33:
+; CHECK: v{{[0-9]+}}.h += vmpyi(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test33(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyih.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test34:
+; CHECK: v{{[0-9]+}}.w += vmpyo(v{{[0-9]+}}.w,v{{[0-9]+}}.h):<<1:rnd:sat:shift
+define void @test34(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd.sacc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test35:
+; CHECK: v{{[0-9]+}}.w += vmpyo(v{{[0-9]+}}.w,v{{[0-9]+}}.h):<<1:sat:shift
+define void @test35(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh.sacc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test36:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh += vmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test36(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test37:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw += vmpy(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define void @test37(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <32 x i32>, <32 x i32>* @c, align 128
+  %1 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv.acc(<32 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %1, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test38:
+; CHECK: v{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.b)
+define void @test38(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybusv.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test39:
+; CHECK: v{{[0-9]+}}.w += vrmpy(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test39(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybv.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test40:
+; CHECK: v{{[0-9]+}}.uw += vrmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test40(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = load <16 x i32>, <16 x i32>* @d, align 64
+  %1 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyubv.acc(<16 x i32> %0, <16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %1, <16 x i32>* @d, align 64
+  ret void
+}
+
+declare <32 x i32> @llvm.hexagon.V6.vtmpyb.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vtmpybus.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vtmpyhb.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhb.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpyub.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpybus.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpybus.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhsusat.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat.acc(<16 x i32>, <32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhisat.acc(<16 x i32>, <32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhsat.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyuh.acc(<32 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwb.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdsaduh.acc(<32 x i32>, <32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyihb.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vaslw.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrw.acc(<16 x i32>, <16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyub.acc(<32 x i32>, <16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhvsat.acc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpybusv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpybv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyhus.acc(<32 x i32>, <16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyhv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiewh.acc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiewuh.acc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyih.acc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd.sacc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyowh.sacc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyubv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyuhv.acc(<32 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpybusv.acc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpybv.acc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpyubv.acc(<16 x i32>, <16 x i32>, <16 x i32>) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvx60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/intrinsics-v60-vmpy.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,517 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at c = external global <32 x i32>
+ at d = external global <16 x i32>
+
+; CHECK-LABEL: test1:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vtmpy(v{{[0-9]+}}:{{[0-9]+}}.b,r{{[0-9]+}}.b)
+define void @test1(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyb(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test2:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vtmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test2(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vtmpybus(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test3:
+; CHECK: v{{[0-9]+}}.w = vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test3(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhb(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test4:
+; CHECK: v{{[0-9]+}}.uw = vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
+define void @test4(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyub(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test5:
+; CHECK: v{{[0-9]+}}.w = vrmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test5(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybus(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test6:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vdsad(v{{[0-9]+}}:{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
+define void @test6(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vdsaduh(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test7:
+; CHECK: v{{[0-9]+}}.h = vdmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test7(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpybus(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test8:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vdmpy(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test8(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test9:
+; CHECK: v{{[0-9]+}}.w = vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.uh):sat
+define void @test9(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsusat(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test10:
+; CHECK: v{{[0-9]+}}.w = vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.uh,#1):sat
+define void @test10(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat(<32 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test11:
+; CHECK: v{{[0-9]+}}.w = vdmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test11(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhsat(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test12:
+; CHECK: v{{[0-9]+}}.w = vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.h):sat
+define void @test12(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhisat(<32 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test13:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vdmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test13(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test14:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test14(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus(<16 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test15:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vmpa(v{{[0-9]+}}:{{[0-9]+}}.ub,r{{[0-9]+}}.b)
+define void @test15(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test16:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vmpa(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test16(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test17:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h)
+define void @test17(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test18:
+; CHECK: v{{[0-9]+}}.h = vmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):<<1:sat
+define void @test18(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhss(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test19:
+; CHECK: v{{[0-9]+}}.h = vmpy(v{{[0-9]+}}.h,r{{[0-9]+}}.h):<<1:rnd:sat
+define void @test19(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test20:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vmpy(v{{[0-9]+}}.uh,r{{[0-9]+}}.uh)
+define void @test20(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuh(<16 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test21:
+; CHECK: v{{[0-9]+}}.h = vmpyi(v{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test21(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyihb(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test22:
+; CHECK: v{{[0-9]+}} = vror(v{{[0-9]+}},r{{[0-9]+}})
+define void @test22(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vror(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test23:
+; CHECK: v{{[0-9]+}}.w = vasr(v{{[0-9]+}}.w,r{{[0-9]+}})
+define void @test23(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test24:
+; CHECK: v{{[0-9]+}}.h = vasr(v{{[0-9]+}}.h,r{{[0-9]+}})
+define void @test24(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vasrh(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test25:
+; CHECK: v{{[0-9]+}}.w = vasl(v{{[0-9]+}}.w,r{{[0-9]+}})
+define void @test25(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaslw(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test26:
+; CHECK: v{{[0-9]+}}.h = vasl(v{{[0-9]+}}.h,r{{[0-9]+}})
+define void @test26(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vaslh(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test27:
+; CHECK: v{{[0-9]+}}.uw = vlsr(v{{[0-9]+}}.uw,r{{[0-9]+}})
+define void @test27(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test28:
+; CHECK: v{{[0-9]+}}.uh = vlsr(v{{[0-9]+}}.uh,r{{[0-9]+}})
+define void @test28(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vlsrh(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test29:
+; CHECK: v{{[0-9]+}}.w = vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.h)
+define void @test29(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test30:
+; CHECK: v{{[0-9]+}}.w = vmpyi(v{{[0-9]+}}.w,r{{[0-9]+}}.b)
+define void @test30(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32> %a, i32 %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test31:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vtmpy(v{{[0-9]+}}:{{[0-9]+}}.h,r{{[0-9]+}}.b)
+define void @test31(<32 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vtmpyhb(<32 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test32:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vmpy(v{{[0-9]+}}.ub,r{{[0-9]+}}.ub)
+define void @test32(<16 x i32> %a, i32 %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %a, i32 %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test33:
+; CHECK: v{{[0-9]+}}.uw = vrmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test33(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpyubv(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test34:
+; CHECK: v{{[0-9]+}}.w = vrmpy(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test34(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybv(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test35:
+; CHECK: v{{[0-9]+}}.w = vrmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.b)
+define void @test35(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vrmpybusv(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test36:
+; CHECK: v{{[0-9]+}}.w = vdmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.h):sat
+define void @test36(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test37:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vmpy(v{{[0-9]+}}.b,v{{[0-9]+}}.b)
+define void @test37(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpybv(<16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test38:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uh = vmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.ub)
+define void @test38(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test39:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vmpy(v{{[0-9]+}}.ub,v{{[0-9]+}}.b)
+define void @test39(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpybusv(<16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test40:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test40(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhv(<16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test41:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.uw = vmpy(v{{[0-9]+}}.uh,v{{[0-9]+}}.uh)
+define void @test41(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test42:
+; CHECK: v{{[0-9]+}}.h = vmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.h):<<1:rnd:sat
+define void @test42(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test43:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.w = vmpy(v{{[0-9]+}}.h,v{{[0-9]+}}.uh)
+define void @test43(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhus(<16 x i32> %a, <16 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test44:
+; CHECK: v{{[0-9]+}}:{{[0-9]+}}.h = vmpa(v{{[0-9]+}}:{{[0-9]+}}.ub,v{{[0-9]+}}:{{[0-9]+}}.b)
+define void @test44(<32 x i32> %a, <32 x i32> %b) #0 {
+entry:
+  %0 = tail call <32 x i32> @llvm.hexagon.V6.vmpabusv(<32 x i32> %a, <32 x i32> %b)
+  store <32 x i32> %0, <32 x i32>* @c, align 128
+  ret void
+}
+
+; CHECK-LABEL: test45:
+; CHECK: v{{[0-9]+}}.h = vmpyi(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test45(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyih(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test46:
+; CHECK: v{{[0-9]+}}.w = vmpye(v{{[0-9]+}}.w,v{{[0-9]+}}.uh)
+define void @test46(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyewuh(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test47:
+; CHECK: v{{[0-9]+}}.w = vmpyo(v{{[0-9]+}}.w,v{{[0-9]+}}.h):<<1:sat
+define void @test47(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test48:
+; CHECK: v{{[0-9]+}}.w = vmpyie(v{{[0-9]+}}.w,v{{[0-9]+}}.uh)
+define void @test48(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiewuh(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test49:
+; CHECK: v{{[0-9]+}}.w = vmpyio(v{{[0-9]+}}.w,v{{[0-9]+}}.h)
+define void @test49(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiowh(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test50:
+; CHECK: v{{[0-9]+}}.w = vmpyo(v{{[0-9]+}}.w,v{{[0-9]+}}.h):<<1:rnd:sat
+define void @test50(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+; CHECK-LABEL: test51:
+; CHECK: v{{[0-9]+}}.w = vmpyieo(v{{[0-9]+}}.h,v{{[0-9]+}}.h)
+define void @test51(<16 x i32> %a, <16 x i32> %b) #0 {
+entry:
+  %0 = tail call <16 x i32> @llvm.hexagon.V6.vmpyieoh(<16 x i32> %a, <16 x i32> %b)
+  store <16 x i32> %0, <16 x i32>* @d, align 64
+  ret void
+}
+
+declare <32 x i32> @llvm.hexagon.V6.vtmpyb(<32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vtmpybus(<32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhb(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpyub(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpybus(<16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdsaduh(<32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpybus(<16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhsusat(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhsuisat(<32 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhsat(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhisat(<32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vdmpyhb.dv(<32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpybus(<16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpabus(<32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpahb(<32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyhss(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyhsrs(<16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyuh(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyihb(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vror(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vasrh(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vaslw(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vaslh(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vlsrw(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vlsrh(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwh(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwb(<16 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vtmpyhb(<32 x i32>, i32) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32>, i32) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpyubv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpybv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vrmpybusv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vdmpyhvsat(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpybv(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpybusv(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyhv(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyuhv(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyhvsrs(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpyhus(<16 x i32>, <16 x i32>) #0
+declare <32 x i32> @llvm.hexagon.V6.vmpabusv(<32 x i32>, <32 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyih(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyewuh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyowh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiewuh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyiowh(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyowh.rnd(<16 x i32>, <16 x i32>) #0
+declare <16 x i32> @llvm.hexagon.V6.vmpyieoh(<16 x i32>, <16 x i32>) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/invalid-memrefs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/invalid-memrefs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/invalid-memrefs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/invalid-memrefs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,103 @@
+; RUN: llc -O2 -march=hexagon -hexagon-expand-condsets=0 < %s
+; REQUIRES: asserts
+; Disable expand-condsets because it will assert on undefined registers.
+
+target triple = "hexagon-unknown--elf"
+
+%s.0 = type { %s.0*, %s.0* }
+%s.1 = type { %s.1*, %s.1** }
+
+ at g0 = external global %s.0, align 4
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b26, label %b3
+
+b3:                                               ; preds = %b2
+  br i1 undef, label %b6, label %b4
+
+b4:                                               ; preds = %b3
+  br i1 undef, label %b5, label %b26
+
+b5:                                               ; preds = %b4
+  br i1 undef, label %b7, label %b26
+
+b6:                                               ; preds = %b3
+  br label %b7
+
+b7:                                               ; preds = %b6, %b5
+  br i1 undef, label %b11, label %b8
+
+b8:                                               ; preds = %b7
+  br i1 undef, label %b10, label %b9
+
+b9:                                               ; preds = %b8
+  unreachable
+
+b10:                                              ; preds = %b8
+  unreachable
+
+b11:                                              ; preds = %b7
+  br i1 undef, label %b25, label %b12
+
+b12:                                              ; preds = %b11
+  br i1 undef, label %b14, label %b13
+
+b13:                                              ; preds = %b12
+  br label %b14
+
+b14:                                              ; preds = %b13, %b12
+  br i1 undef, label %b15, label %b16
+
+b15:                                              ; preds = %b14
+  br label %b16
+
+b16:                                              ; preds = %b15, %b14
+  br i1 undef, label %b18, label %b17
+
+b17:                                              ; preds = %b16
+  unreachable
+
+b18:                                              ; preds = %b16
+  %v0 = load %s.0*, %s.0** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 4
+  %v1 = load %s.0*, %s.0** getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 0), align 4
+  %v2 = select i1 undef, %s.0* %v0, %s.0* %v1
+  br i1 undef, label %b22, label %b19
+
+b19:                                              ; preds = %b18
+  %v3 = load %s.1*, %s.1** undef, align 4
+  %v4 = icmp eq %s.1* %v3, null
+  br i1 %v4, label %b21, label %b20
+
+b20:                                              ; preds = %b19
+  store %s.1** undef, %s.1*** undef, align 4
+  br label %b21
+
+b21:                                              ; preds = %b20, %b19
+  br label %b22
+
+b22:                                              ; preds = %b21, %b18
+  br i1 undef, label %b24, label %b23
+
+b23:                                              ; preds = %b22
+  store %s.0* %v2, %s.0** undef, align 4
+  br label %b24
+
+b24:                                              ; preds = %b23, %b22
+  unreachable
+
+b25:                                              ; preds = %b11
+  unreachable
+
+b26:                                              ; preds = %b5, %b4, %b2
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/jump-table-g0.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/jump-table-g0.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/jump-table-g0.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/jump-table-g0.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,45 @@
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s
+; REQUIRES: asserts
+
+; Check for successful compilation.
+
+; Function Attrs: nounwind
+declare void @f0(i32) #0
+
+; Function Attrs: nounwind
+define i32 @f1(i32 %a0) #0 {
+b0:
+  switch i32 %a0, label %b1 [
+    i32 1, label %b2
+    i32 2, label %b3
+    i32 3, label %b4
+    i32 4, label %b5
+    i32 5, label %b6
+  ]
+
+b1:                                               ; preds = %b0
+  ret i32 0
+
+b2:                                               ; preds = %b0
+  call void @f0(i32 4)
+  ret i32 4
+
+b3:                                               ; preds = %b0
+  call void @f0(i32 2)
+  call void @f0(i32 42)
+  ret i32 42
+
+b4:                                               ; preds = %b0
+  call void @f0(i32 -1)
+  ret i32 -1
+
+b5:                                               ; preds = %b0
+  call void @f0(i32 123)
+  ret i32 123
+
+b6:                                               ; preds = %b0
+  call void @f0(i32 88)
+  ret i32 4
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/jump-table-isel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/jump-table-isel.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/jump-table-isel.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/jump-table-isel.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,97 @@
+; RUN: llc -march=hexagon -hexagon-emit-jump-tables=0 < %s
+; REQUIRES: asserts
+; Check for successful compilation.
+
+target triple = "hexagon"
+
+%s.0 = type opaque
+%s.1 = type { i32, i32, i32 }
+
+ at g0 = external global %s.0
+ at g1 = external global %s.0
+ at g2 = external global %s.0
+ at g3 = external global %s.0
+ at g4 = external global %s.0
+
+; Function Attrs: nounwind optsize
+define zeroext i8 @f0(%s.1* %a0, %s.0** nocapture %a1) #0 {
+b0:
+  store %s.0* null, %s.0** %a1, align 4, !tbaa !0
+  %v0 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 1
+  %v1 = load i32, i32* %v0, align 4, !tbaa !4
+  %v2 = icmp eq i32 %v1, 0
+  br i1 %v2, label %b1, label %b8
+
+b1:                                               ; preds = %b0
+  %v3 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 0
+  %v4 = load i32, i32* %v3, align 4, !tbaa !7
+  switch i32 %v4, label %b8 [
+    i32 0, label %b2
+    i32 1, label %b4
+    i32 4, label %b5
+    i32 5, label %b6
+    i32 2, label %b7
+  ]
+
+b2:                                               ; preds = %b1
+  %v5 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
+  %v6 = load i32, i32* %v5, align 4, !tbaa !8
+  switch i32 %v6, label %b8 [
+    i32 27, label %b3
+    i32 44, label %b3
+  ]
+
+b3:                                               ; preds = %b7, %b7, %b7, %b6, %b6, %b5, %b5, %b4, %b4, %b2, %b2
+  %v7 = phi %s.0* [ @g0, %b2 ], [ @g0, %b2 ], [ @g1, %b4 ], [ @g1, %b4 ], [ @g2, %b5 ], [ @g2, %b5 ], [ @g3, %b6 ], [ @g3, %b6 ], [ @g4, %b7 ], [ @g4, %b7 ], [ @g4, %b7 ]
+  store %s.0* %v7, %s.0** %a1, align 4, !tbaa !0
+  br label %b8
+
+b4:                                               ; preds = %b1
+  %v8 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
+  %v9 = load i32, i32* %v8, align 4, !tbaa !8
+  switch i32 %v9, label %b8 [
+    i32 27, label %b3
+    i32 44, label %b3
+  ]
+
+b5:                                               ; preds = %b1
+  %v10 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
+  %v11 = load i32, i32* %v10, align 4, !tbaa !8
+  switch i32 %v11, label %b8 [
+    i32 27, label %b3
+    i32 44, label %b3
+  ]
+
+b6:                                               ; preds = %b1
+  %v12 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
+  %v13 = load i32, i32* %v12, align 4, !tbaa !8
+  switch i32 %v13, label %b8 [
+    i32 27, label %b3
+    i32 44, label %b3
+  ]
+
+b7:                                               ; preds = %b1
+  %v14 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2
+  %v15 = load i32, i32* %v14, align 4, !tbaa !8
+  switch i32 %v15, label %b8 [
+    i32 40, label %b3
+    i32 46, label %b3
+    i32 47, label %b3
+  ]
+
+b8:                                               ; preds = %b7, %b6, %b5, %b4, %b3, %b2, %b1, %b0
+  %v16 = phi i8 [ 1, %b3 ], [ 0, %b0 ], [ 0, %b2 ], [ 0, %b4 ], [ 0, %b5 ], [ 0, %b6 ], [ 0, %b1 ], [ 0, %b7 ]
+  ret i8 %v16
+}
+
+attributes #0 = { nounwind optsize }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !6, i64 4}
+!5 = !{!"_ZTS3bar", !6, i64 0, !6, i64 4, !6, i64 8}
+!6 = !{!"int", !2, i64 0}
+!7 = !{!5, !6, i64 0}
+!8 = !{!5, !6, i64 8}

Added: llvm/trunk/test/CodeGen/Hexagon/large-number-of-preds.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/large-number-of-preds.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/large-number-of-preds.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/large-number-of-preds.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,245 @@
+; RUN: llc -O3 -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = external global void (float*, i32, i32, float*, float*)**
+
+; Function Attrs: nounwind
+define void @f0(float* nocapture %a0, float* nocapture %a1, float* %a2) #0 {
+b0:
+  %v0 = alloca [64 x float], align 16
+  %v1 = alloca [8 x float], align 8
+  %v2 = bitcast [64 x float]* %v0 to i8*
+  call void @llvm.lifetime.start.p0i8(i64 256, i8* %v2) #2
+  %v3 = load float, float* %a0, align 4, !tbaa !0
+  %v4 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 35
+  store float %v3, float* %v4, align 4, !tbaa !0
+  %v5 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 0
+  store float %v3, float* %v5, align 16, !tbaa !0
+  %v6 = getelementptr inbounds float, float* %a0, i32 1
+  %v7 = load float, float* %v6, align 4, !tbaa !0
+  %v8 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 36
+  store float %v7, float* %v8, align 16, !tbaa !0
+  %v9 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 1
+  store float %v7, float* %v9, align 4, !tbaa !0
+  %v10 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 37
+  store float 1.000000e+00, float* %v10, align 4, !tbaa !0
+  %v11 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 2
+  store float 1.000000e+00, float* %v11, align 8, !tbaa !0
+  %v12 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 34
+  store float 0.000000e+00, float* %v12, align 8, !tbaa !0
+  %v13 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 33
+  store float 0.000000e+00, float* %v13, align 4, !tbaa !0
+  %v14 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 32
+  store float 0.000000e+00, float* %v14, align 16, !tbaa !0
+  %v15 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 5
+  store float 0.000000e+00, float* %v15, align 4, !tbaa !0
+  %v16 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 4
+  store float 0.000000e+00, float* %v16, align 16, !tbaa !0
+  %v17 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 3
+  store float 0.000000e+00, float* %v17, align 4, !tbaa !0
+  %v18 = load float, float* %a1, align 4, !tbaa !0
+  %v19 = fmul float %v3, %v18
+  %v20 = fsub float -0.000000e+00, %v19
+  %v21 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 6
+  store float %v20, float* %v21, align 8, !tbaa !0
+  %v22 = fmul float %v7, %v18
+  %v23 = fsub float -0.000000e+00, %v22
+  %v24 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 7
+  store float %v23, float* %v24, align 4, !tbaa !0
+  %v25 = getelementptr inbounds float, float* %a1, i32 1
+  %v26 = load float, float* %v25, align 4, !tbaa !0
+  %v27 = fmul float %v3, %v26
+  %v28 = fsub float -0.000000e+00, %v27
+  %v29 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 38
+  store float %v28, float* %v29, align 8, !tbaa !0
+  %v30 = fmul float %v7, %v26
+  %v31 = fsub float -0.000000e+00, %v30
+  %v32 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 39
+  store float %v31, float* %v32, align 4, !tbaa !0
+  %v33 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 0
+  store float %v18, float* %v33, align 8, !tbaa !0
+  %v34 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 4
+  store float %v26, float* %v34, align 8, !tbaa !0
+  %v35 = getelementptr float, float* %a0, i32 2
+  %v36 = getelementptr float, float* %a1, i32 2
+  %v37 = load float, float* %v35, align 4, !tbaa !0
+  %v38 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 43
+  store float %v37, float* %v38, align 4, !tbaa !0
+  %v39 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 8
+  store float %v37, float* %v39, align 16, !tbaa !0
+  %v40 = getelementptr inbounds float, float* %a0, i32 3
+  %v41 = load float, float* %v40, align 4, !tbaa !0
+  %v42 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 44
+  store float %v41, float* %v42, align 16, !tbaa !0
+  %v43 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 9
+  store float %v41, float* %v43, align 4, !tbaa !0
+  %v44 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 45
+  store float 1.000000e+00, float* %v44, align 4, !tbaa !0
+  %v45 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 10
+  store float 1.000000e+00, float* %v45, align 8, !tbaa !0
+  %v46 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 42
+  store float 0.000000e+00, float* %v46, align 8, !tbaa !0
+  %v47 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 41
+  store float 0.000000e+00, float* %v47, align 4, !tbaa !0
+  %v48 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 40
+  store float 0.000000e+00, float* %v48, align 16, !tbaa !0
+  %v49 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 13
+  store float 0.000000e+00, float* %v49, align 4, !tbaa !0
+  %v50 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 12
+  store float 0.000000e+00, float* %v50, align 16, !tbaa !0
+  %v51 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 11
+  store float 0.000000e+00, float* %v51, align 4, !tbaa !0
+  %v52 = load float, float* %v36, align 4, !tbaa !0
+  %v53 = fmul float %v37, %v52
+  %v54 = fsub float -0.000000e+00, %v53
+  %v55 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 14
+  store float %v54, float* %v55, align 8, !tbaa !0
+  %v56 = fmul float %v41, %v52
+  %v57 = fsub float -0.000000e+00, %v56
+  %v58 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 15
+  store float %v57, float* %v58, align 4, !tbaa !0
+  %v59 = getelementptr inbounds float, float* %a1, i32 3
+  %v60 = load float, float* %v59, align 4, !tbaa !0
+  %v61 = fmul float %v37, %v60
+  %v62 = fsub float -0.000000e+00, %v61
+  %v63 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 46
+  store float %v62, float* %v63, align 8, !tbaa !0
+  %v64 = fmul float %v41, %v60
+  %v65 = fsub float -0.000000e+00, %v64
+  %v66 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 47
+  store float %v65, float* %v66, align 4, !tbaa !0
+  %v67 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 1
+  store float %v52, float* %v67, align 4, !tbaa !0
+  %v68 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 5
+  store float %v60, float* %v68, align 4, !tbaa !0
+  %v69 = getelementptr float, float* %a0, i32 4
+  %v70 = getelementptr float, float* %a1, i32 4
+  %v71 = load float, float* %v69, align 4, !tbaa !0
+  %v72 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 51
+  store float %v71, float* %v72, align 4, !tbaa !0
+  %v73 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 16
+  store float %v71, float* %v73, align 16, !tbaa !0
+  %v74 = getelementptr inbounds float, float* %a0, i32 5
+  %v75 = load float, float* %v74, align 4, !tbaa !0
+  %v76 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 52
+  store float %v75, float* %v76, align 16, !tbaa !0
+  %v77 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 17
+  store float %v75, float* %v77, align 4, !tbaa !0
+  %v78 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 53
+  store float 1.000000e+00, float* %v78, align 4, !tbaa !0
+  %v79 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 18
+  store float 1.000000e+00, float* %v79, align 8, !tbaa !0
+  %v80 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 50
+  store float 0.000000e+00, float* %v80, align 8, !tbaa !0
+  %v81 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 49
+  store float 0.000000e+00, float* %v81, align 4, !tbaa !0
+  %v82 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 48
+  store float 0.000000e+00, float* %v82, align 16, !tbaa !0
+  %v83 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 21
+  store float 0.000000e+00, float* %v83, align 4, !tbaa !0
+  %v84 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 20
+  store float 0.000000e+00, float* %v84, align 16, !tbaa !0
+  %v85 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 19
+  store float 0.000000e+00, float* %v85, align 4, !tbaa !0
+  %v86 = load float, float* %v70, align 4, !tbaa !0
+  %v87 = fmul float %v71, %v86
+  %v88 = fsub float -0.000000e+00, %v87
+  %v89 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 22
+  store float %v88, float* %v89, align 8, !tbaa !0
+  %v90 = fmul float %v75, %v86
+  %v91 = fsub float -0.000000e+00, %v90
+  %v92 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 23
+  store float %v91, float* %v92, align 4, !tbaa !0
+  %v93 = getelementptr inbounds float, float* %a1, i32 5
+  %v94 = load float, float* %v93, align 4, !tbaa !0
+  %v95 = fmul float %v71, %v94
+  %v96 = fsub float -0.000000e+00, %v95
+  %v97 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 54
+  store float %v96, float* %v97, align 8, !tbaa !0
+  %v98 = fmul float %v75, %v94
+  %v99 = fsub float -0.000000e+00, %v98
+  %v100 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 55
+  store float %v99, float* %v100, align 4, !tbaa !0
+  %v101 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 2
+  store float %v86, float* %v101, align 8, !tbaa !0
+  %v102 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 6
+  store float %v94, float* %v102, align 8, !tbaa !0
+  %v103 = getelementptr float, float* %a0, i32 6
+  %v104 = getelementptr float, float* %a1, i32 6
+  %v105 = load float, float* %v103, align 4, !tbaa !0
+  %v106 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 59
+  store float %v105, float* %v106, align 4, !tbaa !0
+  %v107 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 24
+  store float %v105, float* %v107, align 16, !tbaa !0
+  %v108 = getelementptr inbounds float, float* %a0, i32 7
+  %v109 = load float, float* %v108, align 4, !tbaa !0
+  %v110 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 60
+  store float %v109, float* %v110, align 16, !tbaa !0
+  %v111 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 25
+  store float %v109, float* %v111, align 4, !tbaa !0
+  %v112 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 61
+  store float 1.000000e+00, float* %v112, align 4, !tbaa !0
+  %v113 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 26
+  store float 1.000000e+00, float* %v113, align 8, !tbaa !0
+  %v114 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 58
+  store float 0.000000e+00, float* %v114, align 8, !tbaa !0
+  %v115 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 57
+  store float 0.000000e+00, float* %v115, align 4, !tbaa !0
+  %v116 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 56
+  store float 0.000000e+00, float* %v116, align 16, !tbaa !0
+  %v117 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 29
+  store float 0.000000e+00, float* %v117, align 4, !tbaa !0
+  %v118 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 28
+  store float 0.000000e+00, float* %v118, align 16, !tbaa !0
+  %v119 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 27
+  store float 0.000000e+00, float* %v119, align 4, !tbaa !0
+  %v120 = load float, float* %v104, align 4, !tbaa !0
+  %v121 = fmul float %v105, %v120
+  %v122 = fsub float -0.000000e+00, %v121
+  %v123 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 30
+  store float %v122, float* %v123, align 8, !tbaa !0
+  %v124 = fmul float %v109, %v120
+  %v125 = fsub float -0.000000e+00, %v124
+  %v126 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 31
+  store float %v125, float* %v126, align 4, !tbaa !0
+  %v127 = getelementptr inbounds float, float* %a1, i32 7
+  %v128 = load float, float* %v127, align 4, !tbaa !0
+  %v129 = fmul float %v105, %v128
+  %v130 = fsub float -0.000000e+00, %v129
+  %v131 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 62
+  store float %v130, float* %v131, align 8, !tbaa !0
+  %v132 = fmul float %v109, %v128
+  %v133 = fsub float -0.000000e+00, %v132
+  %v134 = getelementptr inbounds [64 x float], [64 x float]* %v0, i32 0, i32 63
+  store float %v133, float* %v134, align 4, !tbaa !0
+  %v135 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 3
+  store float %v120, float* %v135, align 4, !tbaa !0
+  %v136 = getelementptr inbounds [8 x float], [8 x float]* %v1, i32 0, i32 7
+  store float %v128, float* %v136, align 4, !tbaa !0
+  %v137 = load void (float*, i32, i32, float*, float*)**, void (float*, i32, i32, float*, float*)*** @g0, align 4, !tbaa !4
+  %v138 = load void (float*, i32, i32, float*, float*)*, void (float*, i32, i32, float*, float*)** %v137, align 4, !tbaa !4
+  call void %v138(float* %v5, i32 8, i32 8, float* %v33, float* %a2) #2
+  %v139 = getelementptr inbounds float, float* %a2, i32 8
+  store float 1.000000e+00, float* %v139, align 4, !tbaa !0
+  call void @llvm.lifetime.end.p0i8(i64 256, i8* %v2) #2
+  ret void
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"float", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"any pointer", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/lcomm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/lcomm.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/lcomm.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/lcomm.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,15 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK-NOT: .lcomm  g0,4,4,4
+
+target triple = "hexagon"
+
+ at g0 = internal global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = load i32, i32* @g0, align 4
+  ret i32 %v0
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/load-abs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/load-abs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/load-abs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/load-abs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,135 @@
+; RUN: llc -march=hexagon -O3 -hexagon-small-data-threshold=0 < %s | FileCheck %s
+; Check that absolute loads are generated for 64-bit
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = external global i8, align 8
+ at g1 = external global i16, align 8
+ at g2 = external global i32, align 8
+ at g3 = external global i64, align 8
+
+; CHECK-LABEL: f0:
+; CHECK: = memd(##441656)
+define i64 @f0() #0 {
+b0:
+  %v0 = load volatile i64, i64* inttoptr (i32 441656 to i64*)
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f1:
+; CHECK: = memw(##441656)
+define i64 @f1() #0 {
+b0:
+  %v0 = load volatile i32, i32* inttoptr (i32 441656 to i32*)
+  %v1 = sext i32 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f2:
+; CHECK: = memw(##441656)
+define i64 @f2() #0 {
+b0:
+  %v0 = load volatile i32, i32* inttoptr (i32 441656 to i32*)
+  %v1 = zext i32 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f3:
+; CHECK: = memh(##441656)
+define i64 @f3() #0 {
+b0:
+  %v0 = load volatile i16, i16* inttoptr (i32 441656 to i16*)
+  %v1 = sext i16 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f4:
+; CHECK: = memuh(##441656)
+define i64 @f4() #0 {
+b0:
+  %v0 = load volatile i16, i16* inttoptr (i32 441656 to i16*)
+  %v1 = zext i16 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f5:
+; CHECK: = memb(##441656)
+define i64 @f5() #0 {
+b0:
+  %v0 = load volatile i8, i8* inttoptr (i32 441656 to i8*)
+  %v1 = sext i8 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f6:
+; CHECK: = memub(##441656)
+define i64 @f6() #0 {
+b0:
+  %v0 = load volatile i8, i8* inttoptr (i32 441656 to i8*)
+  %v1 = zext i8 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f7:
+; CHECK: = memd(##g3)
+define i64 @f7() #0 {
+b0:
+  %v0 = load volatile i64, i64* @g3
+  ret i64 %v0
+}
+
+; CHECK-LABEL: f8:
+; CHECK: = memw(##g2)
+define i64 @f8() #0 {
+b0:
+  %v0 = load volatile i32, i32* @g2
+  %v1 = sext i32 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f9:
+; CHECK: = memw(##g2)
+define i64 @f9() #0 {
+b0:
+  %v0 = load volatile i32, i32* @g2
+  %v1 = zext i32 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f10:
+; CHECK: = memh(##g1)
+define i64 @f10() #0 {
+b0:
+  %v0 = load volatile i16, i16* @g1
+  %v1 = sext i16 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f11:
+; CHECK: = memuh(##g1)
+define i64 @f11() #0 {
+b0:
+  %v0 = load volatile i16, i16* @g1
+  %v1 = zext i16 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f12:
+; CHECK: = memb(##g0)
+define i64 @f12() #0 {
+b0:
+  %v0 = load volatile i8, i8* @g0
+  %v1 = sext i8 %v0 to i64
+  ret i64 %v1
+}
+
+; CHECK-LABEL: f13:
+; CHECK: = memub(##g0)
+define i64 @f13() #0 {
+b0:
+  %v0 = load volatile i8, i8* @g0
+  %v1 = zext i8 %v0 to i64
+  ret i64 %v1
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/local-exec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/local-exec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/local-exec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/local-exec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon -O0 < %s | FileCheck %s
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = internal thread_local(localexec) global i32 0, align 4
+ at g1 = internal thread_local(localexec) global i32 0, align 4
+; CHECK: ##g0@{{TPREL|tprel}}
+; CHECK: ##g1@{{TPREL|tprel}}
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32*, align 4
+  store i32 0, i32* %v0
+  store i32* @g0, i32** %v1, align 4
+  %v2 = load i32, i32* @g1, align 4
+  %v3 = load i32*, i32** %v1, align 4
+  store i32 %v2, i32* %v3, align 4
+  ret i32 0
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/loop-rotate-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/loop-rotate-bug.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/loop-rotate-bug.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/loop-rotate-bug.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,79 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK: cmp.eq
+; CHECK: cmp.eq
+; CHECK: cmp.eq
+; CHECK: cmp.eq
+
+%s.0 = type { i8*, i32, %s.0* }
+
+ at g0 = external global %s.0**, align 4
+ at g1 = private global [4 x i64] zeroinitializer, section "__llvm_prf_cnts", align 8
+
+declare void @f0(%s.0*)
+
+define i32 @f1() #0 {
+b0:
+  %v0 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 0), align 8
+  %v1 = add i64 %v0, 1
+  store i64 %v1, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 0), align 8
+  br label %b1
+
+b1:                                               ; preds = %b6, %b0
+  %v2 = phi i32 [ 0, %b0 ], [ %v27, %b6 ]
+  %v3 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 1), align 8
+  %v4 = add i64 %v3, 1
+  store i64 %v4, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 1), align 8
+  %v5 = load %s.0**, %s.0*** @g0, align 4
+  %v6 = getelementptr inbounds %s.0*, %s.0** %v5, i32 %v2
+  %v7 = load %s.0*, %s.0** %v6, align 4
+  %v8 = icmp eq %s.0* %v7, null
+  br i1 %v8, label %b6, label %b2
+
+b2:                                               ; preds = %b1
+  %v9 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 2), align 8
+  %v10 = add i64 %v9, 1
+  store i64 %v10, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 2), align 8
+  %v11 = bitcast %s.0* %v7 to %s.0*
+  %v12 = getelementptr inbounds %s.0, %s.0* %v11, i32 0, i32 2
+  %v13 = load %s.0*, %s.0** %v12, align 4
+  %v14 = icmp eq %s.0* %v13, null
+  %v15 = getelementptr inbounds %s.0, %s.0* %v11, i32 0, i32 2
+  br i1 %v14, label %b5, label %b3
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v16 = phi %s.0** [ %v25, %b4 ], [ %v15, %b3 ]
+  %v17 = phi %s.0* [ %v20, %b4 ], [ %v7, %b3 ]
+  %v18 = load i64, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 3), align 8
+  %v19 = add i64 %v18, 1
+  store i64 %v19, i64* getelementptr inbounds ([4 x i64], [4 x i64]* @g1, i32 0, i32 3), align 8
+  %v20 = load %s.0*, %s.0** %v16, align 4
+  tail call void @f0(%s.0* %v17)
+  %v21 = bitcast %s.0* %v20 to %s.0*
+  %v22 = getelementptr inbounds %s.0, %s.0* %v21, i32 0, i32 2
+  %v23 = load %s.0*, %s.0** %v22, align 4
+  %v24 = icmp eq %s.0* %v23, null
+  %v25 = getelementptr inbounds %s.0, %s.0* %v21, i32 0, i32 2
+  br i1 %v24, label %b5, label %b4
+
+b5:                                               ; preds = %b4, %b2
+  %v26 = phi %s.0* [ %v7, %b2 ], [ %v20, %b4 ]
+  tail call void @f0(%s.0* %v26)
+  br label %b6
+
+b6:                                               ; preds = %b5, %b1
+  %v27 = add nuw nsw i32 %v2, 1
+  %v28 = icmp eq i32 %v27, 3001
+  br i1 %v28, label %b7, label %b1
+
+b7:                                               ; preds = %b6
+  %v29 = load %s.0*, %s.0** bitcast (%s.0*** @g0 to %s.0**), align 4
+  tail call void @f0(%s.0* %v29)
+  ret i32 undef
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+

Added: llvm/trunk/test/CodeGen/Hexagon/loop-rotate-liveins.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/loop-rotate-liveins.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/loop-rotate-liveins.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/loop-rotate-liveins.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,98 @@
+; RUN: llc -march=hexagon -O3 -verify-machineinstrs < %s | FileCheck %s
+;
+; Make sure that this testcase passes the verifier.
+; CHECK: call f1
+
+target triple = "hexagon"
+
+%s.0 = type { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, [2 x i32], i32, i32, i32, i32, i32, i32, i32, i32, i64, i32, i32, i64, i32, i32, i64, i32, i32, i64, i64, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 }
+
+ at g0 = external global %s.0, align 8
+ at g1 = external hidden unnamed_addr constant [3 x i8], align 1
+
+; Function Attrs: nounwind
+define void @f0() local_unnamed_addr #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  switch i8 undef, label %b3 [
+    i8 35, label %b2
+    i8 10, label %b2
+  ]
+
+b2:                                               ; preds = %b1, %b1
+  unreachable
+
+b3:                                               ; preds = %b1
+  br label %b4
+
+b4:                                               ; preds = %b3
+  switch i8 undef, label %b6 [
+    i8 35, label %b5
+    i8 10, label %b5
+  ]
+
+b5:                                               ; preds = %b4, %b4
+  unreachable
+
+b6:                                               ; preds = %b4
+  call void (i8*, i8*, ...) @f1(i8* nonnull undef, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @g1, i32 0, i32 0), i32* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 45)) #0
+  br label %b7
+
+b7:                                               ; preds = %b6
+  switch i8 undef, label %b9 [
+    i8 35, label %b8
+    i8 10, label %b8
+  ]
+
+b8:                                               ; preds = %b7, %b7
+  unreachable
+
+b9:                                               ; preds = %b7
+  br label %b10
+
+b10:                                              ; preds = %b9
+  switch i8 undef, label %b12 [
+    i8 35, label %b11
+    i8 10, label %b11
+  ]
+
+b11:                                              ; preds = %b10, %b10
+  unreachable
+
+b12:                                              ; preds = %b10
+  br label %b13
+
+b13:                                              ; preds = %b12
+  switch i8 undef, label %b14 [
+    i8 35, label %b15
+    i8 10, label %b15
+  ]
+
+b14:                                              ; preds = %b13
+  br label %b16
+
+b15:                                              ; preds = %b13, %b13
+  unreachable
+
+b16:                                              ; preds = %b17, %b14
+  %v0 = phi i8* [ %v2, %b17 ], [ undef, %b14 ]
+  %v1 = load i8, i8* %v0, align 1
+  switch i8 %v1, label %b17 [
+    i8 32, label %b18
+    i8 9, label %b18
+  ]
+
+b17:                                              ; preds = %b16
+  %v2 = getelementptr inbounds i8, i8* %v0, i32 1
+  br label %b16
+
+b18:                                              ; preds = %b16, %b16
+  unreachable
+}
+
+; Function Attrs: nounwind
+declare void @f1(i8* nocapture readonly, i8* nocapture readonly, ...) local_unnamed_addr #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv62" }

Added: llvm/trunk/test/CodeGen/Hexagon/loop_correctness.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/loop_correctness.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/loop_correctness.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/loop_correctness.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,123 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+
+; CHECK-LABEL: f0:
+; CHECK: loop0(.LBB{{[0-9]+}}_{{[0-9]+}},#3)
+; CHECK: endloop0
+define void @f0(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v1, %b2 ]
+  %v1 = add nsw i32 %v0, 1
+  %v2 = icmp slt i32 %v1, 3
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  ret void
+}
+
+; CHECK-LABEL: f1:
+; CHECK: loop0(.LBB{{[0-9]+}}_{{[0-9]+}},#2)
+; CHECK: endloop0
+define void @f1(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v1, %b2 ]
+  %v1 = add nsw i32 %v0, 2
+  %v2 = icmp slt i32 %v1, 3
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  ret void
+}
+
+; CHECK-LABEL: f2:
+; CHECK: loop0(.LBB{{[0-9]+}}_{{[0-9]+}},#1)
+; CHECK: endloop0
+define void @f2(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v1, %b2 ]
+  %v1 = add nsw i32 %v0, 3
+  %v2 = icmp slt i32 %v1, 3
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  ret void
+}
+
+; CHECK-LABEL: f3:
+; CHECK: loop0(.LBB{{[0-9]+}}_{{[0-9]+}},#4)
+; CHECK: endloop0
+define void @f3(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v1, %b2 ]
+  %v1 = add nsw i32 %v0, 1
+  %v2 = icmp sle i32 %v1, 3
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  ret void
+}
+
+; CHECK-LABEL: f4:
+; CHECK: loop0(.LBB{{[0-9]+}}_{{[0-9]+}},#2)
+; CHECK: endloop0
+define void @f4(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v1, %b2 ]
+  %v1 = add nsw i32 %v0, 2
+  %v2 = icmp sle i32 %v1, 3
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  ret void
+}
+
+; CHECK-LABEL: f5:
+; CHECK: loop0(.LBB{{[0-9]+}}_{{[0-9]+}},#2)
+; CHECK: endloop0
+define void @f5(i8* nocapture %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v0 = phi i32 [ 0, %b1 ], [ %v1, %b2 ]
+  %v1 = add nsw i32 %v0, 3
+  %v2 = icmp sle i32 %v1, 3
+  br i1 %v2, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/lower-i1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/lower-i1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/lower-i1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/lower-i1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,13 @@
+; RUN: llc -march=hexagon -debug < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i1 zeroext %a0) #0 {
+b0:
+  %v0 = select i1 %a0, i32 1, i32 2
+  ret i32 %v0
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/machine-sink.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/machine-sink.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/machine-sink.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/machine-sink.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,76 @@
+; RUN: llc -march=hexagon -machine-sink-split < %s
+; REQUIRES: asserts
+; MachineSink should not sink an MI which is used in a non-phi instruction
+; in an MBB with multiple predecessors.
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  %v0 = load i8*, i8** undef, align 4
+  %v1 = getelementptr inbounds i8, i8* %v0, i32 1
+  %v2 = load i8, i8* %v0, align 1, !tbaa !0
+  %v3 = zext i8 %v2 to i32
+  %v4 = shl nuw nsw i32 %v3, 8
+  br i1 undef, label %b3, label %b5
+
+b3:                                               ; preds = %b2
+  br i1 undef, label %b15, label %b4
+
+b4:                                               ; preds = %b3
+  br label %b5
+
+b5:                                               ; preds = %b4, %b2
+  %v5 = phi i8* [ undef, %b4 ], [ %v1, %b2 ]
+  %v6 = load i8, i8* %v5, align 1, !tbaa !0
+  %v7 = zext i8 %v6 to i32
+  %v8 = add nsw i32 %v7, %v4
+  %v9 = add nsw i32 %v8, -2
+  br label %b6
+
+b6:                                               ; preds = %b8, %b5
+  br i1 false, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  unreachable
+
+b8:                                               ; preds = %b6
+  br i1 undef, label %b6, label %b9
+
+b9:                                               ; preds = %b8
+  br i1 undef, label %b10, label %b14
+
+b10:                                              ; preds = %b9
+  br i1 undef, label %b11, label %b13
+
+b11:                                              ; preds = %b10
+  br i1 undef, label %b12, label %b13
+
+b12:                                              ; preds = %b11
+  unreachable
+
+b13:                                              ; preds = %b11, %b10
+  store i32 %v9, i32* undef, align 4, !tbaa !3
+  unreachable
+
+b14:                                              ; preds = %b9
+  unreachable
+
+b15:                                              ; preds = %b3
+  ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"int", !1, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/maddsubu.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/maddsubu.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/maddsubu.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/maddsubu.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that we generate 64-bit mutiply accumulate/subtract.
+
+; CHECK-LABEL: f0:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} += mpyu
+define i64 @f0(i64 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = zext i32 %a1 to i64
+  %v1 = zext i32 %a2 to i64
+  %v2 = mul nsw i64 %v1, %v0
+  %v3 = add nsw i64 %v2, %a0
+  ret i64 %v3
+}
+
+; CHECK-LABEL: f1:
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} -= mpyu
+define i64 @f1(i64 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = zext i32 %a1 to i64
+  %v1 = zext i32 %a2 to i64
+  %v2 = mul nsw i64 %v1, %v0
+  %v3 = sub nsw i64 %a0, %v2
+  ret i64 %v3
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/mapped_intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mapped_intrinsics.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mapped_intrinsics.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/mapped_intrinsics.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,113 @@
+; RUN: llc -march=hexagon -debug-only=isel < %s 2>&1 | FileCheck %s
+; REQUIRES: asserts
+
+; This test validates that ISel picks the correct equivalent of below mentioned intrinsics
+; For S2_asr_i_r_rnd_goodsyntax:
+;   if (#u5 == 0) Assembler mapped to: Rd = Rs
+;   else Rd = asr(Rs,#u5-1):rnd
+; For S2_asr_i_p_rnd_goodsyntax:
+;   if (#u6 == 0) Assembler mapped to: Rdd = combine(Rss.H32,Rss.L32)
+;   else Rdd = asr(Rss,#u6-1):rnd
+; For S5_vasrhrnd_goodsyntax:
+;   if (#u4 == 0) Assembler mapped to: Rdd = combine(Rss.H32,Rss.L32)
+;   else Rdd = vasrh(Rss,#u4-1):raw
+; For S5_asrhub_rnd_sat_goodsyntax:
+;   if (#u4 == 0) Assembler mapped to: Rd = vsathub(Rss)
+;   else Rd = vasrhub(Rss,#u4-1):raw
+
+target triple = "hexagon-unknown--elf"
+
+; CHECK-LABEL: f0
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S2.asr.i.r.rnd.goodsyntax
+; CHECK: Morphed node{{.*}}A2_tfr
+define i32 @f0(i32 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.S2.asr.i.r.rnd.goodsyntax(i32 %a0, i32 0)
+  %v1 = add i32 %v0, %a1
+  ret i32 %v1
+}
+
+declare i32 @llvm.hexagon.S2.asr.i.r.rnd.goodsyntax(i32, i32) #1
+
+; CHECK-LABEL: f1
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S2.asr.i.r.rnd.goodsyntax
+; CHECK: Morphed node{{.*}}S2_asr_i_r_rnd
+define i32 @f1(i32 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.S2.asr.i.r.rnd.goodsyntax(i32 %a0, i32 9)
+  %v1 = add i32 %v0, %a1
+  ret i32 %v1
+}
+
+; CHECK-LABEL: f2
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S2.asr.i.p.rnd.goodsyntax
+; CHECK: Morphed node{{.*}}A2_combinew
+define i64 @f2(i64 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = zext i32 %a1 to i64
+  %v1 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 %a0, i32 0)
+  %v2 = add nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+declare i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64, i32) #1
+
+; CHECK-LABEL: f3
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S2.asr.i.p.rnd.goodsyntax
+; CHECK: Morphed node{{.*}}S2_asr_i_p_rnd
+define i64 @f3(i64 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = zext i32 %a1 to i64
+  %v1 = tail call i64 @llvm.hexagon.S2.asr.i.p.rnd.goodsyntax(i64 %a0, i32 9)
+  %v2 = add nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+; CHECK-LABEL: f4
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax
+; CHECK: Morphed node{{.*}}S2_vsathub
+define i32 @f4(i64 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 %a0, i32 0)
+  %v1 = add i32 %v0, %a1
+  ret i32 %v1
+}
+
+declare i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64, i32) #1
+
+; CHECK-LABEL: f5
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax
+; CHECK: Morphed node{{.*}}S5_asrhub_rnd_sat
+define i32 @f5(i64 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax(i64 %a0, i32 9)
+  %v1 = add i32 %v0, %a1
+  ret i32 %v1
+}
+
+; CHECK-LABEL: f6
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S5.vasrhrnd.goodsyntax
+; CHECK: Morphed node{{.*}}A2_combinew
+define i64 @f6(i64 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = zext i32 %a1 to i64
+  %v1 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 %a0, i32 0)
+  %v2 = add nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+declare i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64, i32) #1
+
+; CHECK-LABEL: f7
+; CHECK: ISEL: Starting selection on{{.*}}llvm.hexagon.S5.vasrhrnd.goodsyntax
+; CHECK: Morphed node{{.*}}S5_vasrhrnd
+define i64 @f7(i64 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = zext i32 %a1 to i64
+  %v1 = tail call i64 @llvm.hexagon.S5.vasrhrnd.goodsyntax(i64 %a0, i32 9)
+  %v2 = add nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/mem-load-circ.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-load-circ.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mem-load-circ.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/mem-load-circ.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,111 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; CHECK-LABEL: f0:
+; CHECK: r{{[1-9]+:[0-9]+}} = memd(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
+define i64 @f0(i64* %a0) {
+b0:
+  %v0 = alloca i64, align 8
+  %v1 = getelementptr inbounds i64, i64* %a0, i32 1
+  store i64 0, i64* %v0, align 8, !tbaa !0
+  %v2 = bitcast i64* %v1 to i8*
+  %v3 = bitcast i64* %v0 to i8*
+  %v4 = call i8* @llvm.hexagon.circ.ldd(i8* %v2, i8* %v3, i32 150996984, i32 8)
+  %v5 = load i64, i64* %v0, align 8, !tbaa !0
+  ret i64 %v5
+}
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.ldd(i8*, i8*, i32, i32) #0
+
+; CHECK-LABEL: f1:
+; CHECK: r{{[0-9]*}} = memb(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
+define signext i8 @f1(i8* %a0) {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = getelementptr inbounds i8, i8* %a0, i32 1
+  store i8 0, i8* %v0, align 1, !tbaa !4
+  %v2 = call i8* @llvm.hexagon.circ.ldb(i8* %v1, i8* %v0, i32 16777471, i32 1)
+  %v3 = load i8, i8* %v0, align 1, !tbaa !4
+  ret i8 %v3
+}
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.ldb(i8*, i8*, i32, i32) #0
+
+; CHECK-LABEL: f2:
+; CHECK: r{{[0-9]*}} = memub(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
+define signext i8 @f2(i8* %a0) {
+b0:
+  %v0 = alloca i8, align 1
+  %v1 = getelementptr inbounds i8, i8* %a0, i32 1
+  store i8 0, i8* %v0, align 1, !tbaa !4
+  %v2 = call i8* @llvm.hexagon.circ.ldub(i8* %v1, i8* %v0, i32 16777471, i32 1)
+  %v3 = load i8, i8* %v0, align 1, !tbaa !4
+  ret i8 %v3
+}
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.ldub(i8*, i8*, i32, i32) #0
+
+; CHECK-LABEL: f3:
+; CHECK: r{{[0-9]*}} = memh(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
+define signext i16 @f3(i16* %a0) {
+b0:
+  %v0 = alloca i16, align 2
+  %v1 = getelementptr inbounds i16, i16* %a0, i32 1
+  store i16 0, i16* %v0, align 2, !tbaa !5
+  %v2 = bitcast i16* %v1 to i8*
+  %v3 = bitcast i16* %v0 to i8*
+  %v4 = call i8* @llvm.hexagon.circ.ldh(i8* %v2, i8* %v3, i32 33554942, i32 2)
+  %v5 = load i16, i16* %v0, align 2, !tbaa !5
+  ret i16 %v5
+}
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.ldh(i8*, i8*, i32, i32) #0
+
+; CHECK-LABEL: f4:
+; CHECK: r{{[0-9]*}} = memuh(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
+define signext i16 @f4(i16* %a0) {
+b0:
+  %v0 = alloca i16, align 2
+  %v1 = getelementptr inbounds i16, i16* %a0, i32 1
+  store i16 0, i16* %v0, align 2, !tbaa !5
+  %v2 = bitcast i16* %v1 to i8*
+  %v3 = bitcast i16* %v0 to i8*
+  %v4 = call i8* @llvm.hexagon.circ.lduh(i8* %v2, i8* %v3, i32 33554942, i32 2)
+  %v5 = load i16, i16* %v0, align 2, !tbaa !5
+  ret i16 %v5
+}
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.lduh(i8*, i8*, i32, i32) #0
+
+; CHECK-LABEL: f5:
+; CHECK: r{{[0-9]*}} = memw(r{{[0-9]*}}++#{{[0-9]}}:circ(m{{[01]}}))
+define i32 @f5(i32* %a0) {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = getelementptr inbounds i32, i32* %a0, i32 1
+  store i32 0, i32* %v0, align 4, !tbaa !7
+  %v2 = bitcast i32* %v1 to i8*
+  %v3 = bitcast i32* %v0 to i8*
+  %v4 = call i8* @llvm.hexagon.circ.ldw(i8* %v2, i8* %v3, i32 50332668, i32 4)
+  %v5 = load i32, i32* %v0, align 4, !tbaa !7
+  ret i32 %v5
+}
+
+; Function Attrs: argmemonly nounwind
+declare i8* @llvm.hexagon.circ.ldw(i8*, i8*, i32, i32) #0
+
+attributes #0 = { argmemonly nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long long", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!2, !2, i64 0}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"short", !2, i64 0}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"long", !2, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+; Test that we do not exceed #u5 in memops.
+; CHECK-NOT: memb(r2+#0) -= #32
+
+ at g0 = unnamed_addr global i8 112, align 1
+
+; Function Attrs: norecurse nounwind
+define fastcc void @f0() unnamed_addr #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !4
+  %v1 = zext i8 %v0 to i32
+  %v2 = mul nuw nsw i32 %v1, 9625
+  %v3 = and i32 %v2, 255
+  %v4 = mul nuw nsw i32 %v3, 9625
+  %v5 = and i32 %v4, 255
+  %v6 = trunc i32 %v5 to i8
+  store i8 %v6, i8* @g0, align 1, !tbaa !4
+  ret void
+}
+
+define i32 @f1() {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !4
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nuw nsw i32 %v1, 224
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g0, align 1, !tbaa !4
+  tail call fastcc void @f0()
+  %v4 = load i8, i8* @g0, align 1, !tbaa !4
+  %v5 = zext i8 %v4 to i32
+  ret i32 %v5
+}
+
+attributes #0 = { norecurse nounwind }
+
+!llvm.module.flags = !{!0, !2}
+
+!0 = !{i32 6, !"Target CPU", !1}
+!1 = !{!"hexagonv55"}
+!2 = !{i32 6, !"Target Features", !3}
+!3 = !{!"-hvx"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"omnipotent char", !6, i64 0}
+!6 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/mem-ops-sub_i16_01.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc -O2 -march=hexagon < %s | FileCheck %s
+; Test that we do not exceed #u5 in memops.
+; CHECK-NOT: memh(r2+#0) -= #32
+
+ at g0 = unnamed_addr global i16 -32, align 2
+
+; Function Attrs: norecurse nounwind
+define fastcc void @f0() unnamed_addr #0 {
+b0:
+  %v0 = load i16, i16* @g0, align 1, !tbaa !4
+  %v1 = zext i16 %v0 to i32
+  %v2 = mul nuw nsw i32 %v1, 9625
+  %v3 = and i32 %v2, 255
+  %v4 = mul nuw nsw i32 %v3, 9625
+  %v5 = and i32 %v4, 255
+  %v6 = trunc i32 %v5 to i16
+  store i16 %v6, i16* @g0, align 2, !tbaa !4
+  ret void
+}
+
+define i32 @f1() {
+b0:
+  %v0 = load i16, i16* @g0, align 2, !tbaa !4
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nuw nsw i32 %v1, 65504
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g0, align 2, !tbaa !4
+  tail call fastcc void @f0()
+  %v4 = load i16, i16* @g0, align 2, !tbaa !4
+  %v5 = zext i16 %v4 to i32
+  ret i32 %v5
+}
+
+attributes #0 = { norecurse nounwind }
+
+!llvm.module.flags = !{!0, !2}
+
+!0 = !{i32 6, !"Target CPU", !1}
+!1 = !{!"hexagonv55"}
+!2 = !{i32 6, !"Target Features", !3}
+!3 = !{!"-hvx"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"omnipotent char", !6, i64 0}
+!6 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memcmp.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memcmp.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memcmp.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,48 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: loop0
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readonly
+define i32 @f0(i8* nocapture %a0, i8* nocapture %a1, i32 %a2) #0 {
+b0:
+  %v0 = icmp eq i32 %a2, 0
+  br i1 %v0, label %b6, label %b1
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v1 = phi i8* [ %v10, %b4 ], [ %a1, %b1 ]
+  %v2 = phi i8* [ %v9, %b4 ], [ %a0, %b1 ]
+  %v3 = phi i32 [ %v11, %b4 ], [ %a2, %b1 ]
+  %v4 = load i8, i8* %v2, align 1, !tbaa !0
+  %v5 = load i8, i8* %v1, align 1, !tbaa !0
+  %v6 = icmp eq i8 %v4, %v5
+  br i1 %v6, label %b4, label %b3
+
+b3:                                               ; preds = %b2
+  %v7 = icmp ult i8 %v4, %v5
+  %v8 = select i1 %v7, i32 -1, i32 1
+  br label %b6
+
+b4:                                               ; preds = %b2
+  %v9 = getelementptr inbounds i8, i8* %v2, i32 1
+  %v10 = getelementptr inbounds i8, i8* %v1, i32 1
+  %v11 = add i32 %v3, -1
+  %v12 = icmp eq i32 %v11, 0
+  br i1 %v12, label %b5, label %b2
+
+b5:                                               ; preds = %b4
+  br label %b6
+
+b6:                                               ; preds = %b5, %b3, %b0
+  %v13 = phi i32 [ %v8, %b3 ], [ 0, %b0 ], [ 0, %b5 ]
+  ret i32 %v13
+}
+
+attributes #0 = { nounwind readonly "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/memcpy-memmove-inline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memcpy-memmove-inline.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memcpy-memmove-inline.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memcpy-memmove-inline.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,50 @@
+; RUN: llc -march=hexagon -O2 -mno-pairing -mno-compound < %s | FileCheck %s
+
+; Test to see if we inline calls to memcpy/memmove when
+; the array size is small.
+
+target triple = "hexagon-unknown--elf"
+
+; CHECK-LABEL: f0:
+; CHECK-DAG: [[REG1:r[0-9]*]] = memw(r{{[0-9]*}}+#0)
+; CHECK-DAG: [[REG2:r[0-9]*]] = memuh(r{{[0-9]*}}+#4)
+; CHECK-DAG: [[REG3:r[0-9]*]] = memub(r{{[0-9]*}}+#6)
+; CHECK-DAG: memw(r{{[0-9]*}}+#0) = [[REG1]]
+; CHECK-DAG: memh(r{{[0-9]*}}+#4) = [[REG2]]
+; CHECK-DAG: memb(r{{[0-9]*}}+#6) = [[REG3]]
+
+define i32 @f0(i32* %a0) #0 {
+b0:
+  %v0 = alloca [10 x i32], align 8
+  %v1 = bitcast [10 x i32]* %v0 to i8*
+  %v2 = bitcast i32* %a0 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 %v1, i8* align 4 %v2, i32 7, i1 false)
+  %v3 = getelementptr inbounds [10 x i32], [10 x i32]* %v0, i32 0, i32 0
+  call void @f1(i32* %v3, i32* %a0) #0
+  ret i32 0
+}
+
+declare void @f1(i32*, i32*)
+
+; CHECK-LABEL: f2:
+; CHECK-DAG: [[REG4:r[0-9]*]] = memub(r{{[0-9]*}}+#6)
+; CHECK-DAG: [[REG5:r[0-9]*]] = memuh(r{{[0-9]*}}+#4)
+; CHECK-DAG: [[REG6:r[0-9]*]] = memw(r{{[0-9]*}}+#0)
+; CHECK-DAG: memw(r{{[0-9]*}}+#0) = [[REG6]]
+; CHECK-DAG: memh(r{{[0-9]*}}+#4) = [[REG5]]
+; CHECK-DAG: memb(r{{[0-9]*}}+#6) = [[REG4]]
+
+define i32 @f2(i32* %a0, i32* %a1) #0 {
+b0:
+  %v0 = bitcast i32* %a1 to i8*
+  %v1 = bitcast i32* %a0 to i8*
+  call void @llvm.memmove.p0i8.p0i8.i32(i8* align 4 %v0, i8* align 4 %v1, i32 7, i1 false)
+  tail call void @f1(i32* %a1, i32* %a0) #0
+  ret i32 0
+}
+
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/memop-bit18.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memop-bit18.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memop-bit18.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memop-bit18.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,20 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+target triple = "hexagon"
+
+; CHECK-LABEL: f0:
+; CHECK: memw({{.*}}) = clrbit(#18)
+define void @f0(i32* nocapture %a0) #0 {
+b0:
+  %v0 = load i32, i32* %a0, align 4, !tbaa !0
+  %v1 = and i32 %v0, -262145
+  store i32 %v1, i32* %a0, align 4, !tbaa !0
+  ret void
+}
+
+attributes #0 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/memops_global.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memops_global.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memops_global.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memops_global.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,810 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+ at g0 = common global i8 0, align 1
+ at g1 = common global i8 0, align 1
+ at g2 = common global i16 0, align 2
+ at g3 = common global i16 0, align 2
+ at g4 = common global i32 0, align 4
+ at g5 = common global i32 0, align 4
+
+; CHECK-LABEL: f0:
+; CHECK: memb(r{{[0-9]+}}+#0) += #1
+define void @f0() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = add i8 %v0, 1
+  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f1:
+; CHECK: memb(r{{[0-9]+}}+#0) -= #1
+define void @f1() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = add i8 %v0, -1
+  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f2:
+; CHECK: memb(r{{[0-9]+}}+#0) += #5
+define void @f2() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f3:
+; CHECK: memb(r{{[0-9]+}}+#0) -= #5
+define void @f3() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 251
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f4:
+; CHECK: memb(r{{[0-9]+}}+#0) -= #5
+define void @f4() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 251
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f5:
+; CHECK: memb(r{{[0-9]+}}+#0) += #5
+define void @f5() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f6:
+; CHECK: memb(r{{[0-9]+}}+#0) += r{{[0-9]+}}
+define void @f6(i8 zeroext %a0) #0 {
+b0:
+  %v0 = zext i8 %a0 to i32
+  %v1 = load i8, i8* @g0, align 1, !tbaa !0
+  %v2 = zext i8 %v1 to i32
+  %v3 = add nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i8
+  store i8 %v4, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f7:
+; CHECK: memb(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
+define void @f7(i8 zeroext %a0) #0 {
+b0:
+  %v0 = zext i8 %a0 to i32
+  %v1 = load i8, i8* @g0, align 1, !tbaa !0
+  %v2 = zext i8 %v1 to i32
+  %v3 = sub nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i8
+  store i8 %v4, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f8:
+; CHECK: memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
+define void @f8(i8 zeroext %a0) #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = or i8 %v0, %a0
+  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f9:
+; CHECK: memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
+define void @f9(i8 zeroext %a0) #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = and i8 %v0, %a0
+  store i8 %v1, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f10:
+; CHECK: memb(r{{[0-9]+}}+#0) = clrbit(#5)
+define void @f10() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = and i32 %v1, 223
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f11:
+; CHECK: memb(r{{[0-9]+}}+#0) = setbit(#7)
+define void @f11() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = or i32 %v1, 128
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g0, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f12:
+; CHECK: memb(r{{[0-9]+}}+#0) += #1
+define void @f12() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = add i8 %v0, 1
+  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f13:
+; CHECK: memb(r{{[0-9]+}}+#0) -= #1
+define void @f13() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = add i8 %v0, -1
+  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f14:
+; CHECK: memb(r{{[0-9]+}}+#0) += #5
+define void @f14() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f15:
+; CHECK: memb(r{{[0-9]+}}+#0) -= #5
+define void @f15() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 251
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f16:
+; CHECK: memb(r{{[0-9]+}}+#0) -= #5
+define void @f16() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 251
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f17:
+; CHECK: memb(r{{[0-9]+}}+#0) += #5
+define void @f17() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f18:
+; CHECK: memb(r{{[0-9]+}}+#0) += r{{[0-9]+}}
+define void @f18(i8 signext %a0) #0 {
+b0:
+  %v0 = zext i8 %a0 to i32
+  %v1 = load i8, i8* @g1, align 1, !tbaa !0
+  %v2 = zext i8 %v1 to i32
+  %v3 = add nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i8
+  store i8 %v4, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f19:
+; CHECK: memb(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
+define void @f19(i8 signext %a0) #0 {
+b0:
+  %v0 = zext i8 %a0 to i32
+  %v1 = load i8, i8* @g1, align 1, !tbaa !0
+  %v2 = zext i8 %v1 to i32
+  %v3 = sub nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i8
+  store i8 %v4, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f20:
+; CHECK: memb(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
+define void @f20(i8 signext %a0) #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = or i8 %v0, %a0
+  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f21:
+; CHECK: memb(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
+define void @f21(i8 signext %a0) #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = and i8 %v0, %a0
+  store i8 %v1, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f22:
+; CHECK: memb(r{{[0-9]+}}+#0) = clrbit(#5)
+define void @f22() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = and i32 %v1, 223
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f23:
+; CHECK: memb(r{{[0-9]+}}+#0) = setbit(#7)
+define void @f23() #0 {
+b0:
+  %v0 = load i8, i8* @g1, align 1, !tbaa !0
+  %v1 = zext i8 %v0 to i32
+  %v2 = or i32 %v1, 128
+  %v3 = trunc i32 %v2 to i8
+  store i8 %v3, i8* @g1, align 1, !tbaa !0
+  ret void
+}
+
+; CHECK-LABEL: f24:
+; CHECK: memh(r{{[0-9]+}}+#0) += #1
+define void @f24() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = add i16 %v0, 1
+  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f25:
+; CHECK: memh(r{{[0-9]+}}+#0) -= #1
+define void @f25() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = add i16 %v0, -1
+  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f26:
+; CHECK: memh(r{{[0-9]+}}+#0) += #5
+define void @f26() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f27:
+; CHECK: memh(r{{[0-9]+}}+#0) -= #5
+define void @f27() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 65531
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f28:
+; CHECK: memh(r{{[0-9]+}}+#0) -= #5
+define void @f28() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 65531
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f29:
+; CHECK: memh(r{{[0-9]+}}+#0) += #5
+define void @f29() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f30:
+; CHECK: memh(r{{[0-9]+}}+#0) += r{{[0-9]+}}
+define void @f30(i16 zeroext %a0) #0 {
+b0:
+  %v0 = zext i16 %a0 to i32
+  %v1 = load i16, i16* @g2, align 2, !tbaa !3
+  %v2 = zext i16 %v1 to i32
+  %v3 = add nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i16
+  store i16 %v4, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f31:
+; CHECK: memh(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
+define void @f31(i16 zeroext %a0) #0 {
+b0:
+  %v0 = zext i16 %a0 to i32
+  %v1 = load i16, i16* @g2, align 2, !tbaa !3
+  %v2 = zext i16 %v1 to i32
+  %v3 = sub nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i16
+  store i16 %v4, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f32:
+; CHECK: memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
+define void @f32(i16 zeroext %a0) #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = or i16 %v0, %a0
+  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f33:
+; CHECK: memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
+define void @f33(i16 zeroext %a0) #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = and i16 %v0, %a0
+  store i16 %v1, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f34:
+; CHECK: memh(r{{[0-9]+}}+#0) = clrbit(#5)
+define void @f34() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = and i32 %v1, 65503
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f35:
+; CHECK: memh(r{{[0-9]+}}+#0) = setbit(#7)
+define void @f35() #0 {
+b0:
+  %v0 = load i16, i16* @g2, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = or i32 %v1, 128
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g2, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f36:
+; CHECK: memh(r{{[0-9]+}}+#0) += #1
+define void @f36() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = add i16 %v0, 1
+  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f37:
+; CHECK: memh(r{{[0-9]+}}+#0) -= #1
+define void @f37() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = add i16 %v0, -1
+  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f38:
+; CHECK: memh(r{{[0-9]+}}+#0) += #5
+define void @f38() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f39:
+; CHECK: memh(r{{[0-9]+}}+#0) -= #5
+define void @f39() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 65531
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f40:
+; CHECK: memh(r{{[0-9]+}}+#0) -= #5
+define void @f40() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 65531
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f41
+; CHECK: memh(r{{[0-9]+}}+#0) += #5
+define void @f41() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = add nsw i32 %v1, 5
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f42
+; CHECK: memh(r{{[0-9]+}}+#0) += r{{[0-9]+}}
+define void @f42(i16 signext %a0) #0 {
+b0:
+  %v0 = zext i16 %a0 to i32
+  %v1 = load i16, i16* @g3, align 2, !tbaa !3
+  %v2 = zext i16 %v1 to i32
+  %v3 = add nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i16
+  store i16 %v4, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f43
+; CHECK: memh(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
+define void @f43(i16 signext %a0) #0 {
+b0:
+  %v0 = zext i16 %a0 to i32
+  %v1 = load i16, i16* @g3, align 2, !tbaa !3
+  %v2 = zext i16 %v1 to i32
+  %v3 = sub nsw i32 %v2, %v0
+  %v4 = trunc i32 %v3 to i16
+  store i16 %v4, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f44
+; CHECK: memh(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
+define void @f44(i16 signext %a0) #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = or i16 %v0, %a0
+  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f45
+; CHECK: memh(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
+define void @f45(i16 signext %a0) #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = and i16 %v0, %a0
+  store i16 %v1, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f46
+; CHECK: memh(r{{[0-9]+}}+#0) = clrbit(#5)
+define void @f46() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = and i32 %v1, 65503
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f47
+; CHECK: memh(r{{[0-9]+}}+#0) = setbit(#7)
+define void @f47() #0 {
+b0:
+  %v0 = load i16, i16* @g3, align 2, !tbaa !3
+  %v1 = zext i16 %v0 to i32
+  %v2 = or i32 %v1, 128
+  %v3 = trunc i32 %v2 to i16
+  store i16 %v3, i16* @g3, align 2, !tbaa !3
+  ret void
+}
+
+; CHECK-LABEL: f48
+; CHECK: memw(r{{[0-9]+}}+#0) += #1
+define void @f48() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = add nsw i32 %v0, 1
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f49
+; CHECK: memw(r{{[0-9]+}}+#0) -= #1
+define void @f49() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = add nsw i32 %v0, -1
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f50
+; CHECK: memw(r{{[0-9]+}}+#0) += #5
+define void @f50() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = add nsw i32 %v0, 5
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f51
+; CHECK: memw(r{{[0-9]+}}+#0) -= #5
+define void @f51() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = add nsw i32 %v0, -5
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f52
+; CHECK: memw(r{{[0-9]+}}+#0) -= #5
+define void @f52() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = add nsw i32 %v0, -5
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f53
+; CHECK: memw(r{{[0-9]+}}+#0) += #5
+define void @f53() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = add nsw i32 %v0, 5
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f54
+; CHECK: memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
+define void @f54(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = add i32 %v0, %a0
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f55
+; CHECK: memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
+define void @f55(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = sub i32 %v0, %a0
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f56
+; CHECK: memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
+define void @f56(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = or i32 %v0, %a0
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f57
+; CHECK: memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
+define void @f57(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = and i32 %v0, %a0
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f58
+; CHECK: memw(r{{[0-9]+}}+#0) = clrbit(#5)
+define void @f58() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = and i32 %v0, -33
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f59
+; CHECK: memw(r{{[0-9]+}}+#0) = setbit(#7)
+define void @f59() #0 {
+b0:
+  %v0 = load i32, i32* @g4, align 4, !tbaa !5
+  %v1 = or i32 %v0, 128
+  store i32 %v1, i32* @g4, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f60
+; CHECK: memw(r{{[0-9]+}}+#0) += #1
+define void @f60() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = add i32 %v0, 1
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f61
+; CHECK: memw(r{{[0-9]+}}+#0) -= #1
+define void @f61() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = add i32 %v0, -1
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f62
+; CHECK: memw(r{{[0-9]+}}+#0) += #5
+define void @f62() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = add i32 %v0, 5
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f63
+; CHECK: memw(r{{[0-9]+}}+#0) -= #5
+define void @f63() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = add i32 %v0, -5
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f64
+; CHECK: memw(r{{[0-9]+}}+#0) -= #5
+define void @f64() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = add i32 %v0, -5
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f65
+; CHECK: memw(r{{[0-9]+}}+#0) += #5
+define void @f65() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = add i32 %v0, 5
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f66:
+; CHECK: memw(r{{[0-9]+}}+#0) += r{{[0-9]+}}
+define void @f66(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = add i32 %v0, %a0
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f67:
+; CHECK: memw(r{{[0-9]+}}+#0) -= r{{[0-9]+}}
+define void @f67(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = sub i32 %v0, %a0
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f68:
+; CHECK: memw(r{{[0-9]+}}+#0) |= r{{[0-9]+}}
+define void @f68(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = or i32 %v0, %a0
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f69:
+; CHECK: memw(r{{[0-9]+}}+#0) &= r{{[0-9]+}}
+define void @f69(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = and i32 %v0, %a0
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f70:
+; CHECK: memw(r{{[0-9]+}}+#0) = clrbit(#5)
+define void @f70() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = and i32 %v0, -33
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+; CHECK-LABEL: f71:
+; CHECK: memw(r{{[0-9]+}}+#0) = setbit(#7)
+define void @f71() #0 {
+b0:
+  %v0 = load i32, i32* @g5, align 4, !tbaa !5
+  %v1 = or i32 %v0, 128
+  store i32 %v1, i32* @g5, align 4, !tbaa !5
+  ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"short", !1}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"int", !1}

Added: llvm/trunk/test/CodeGen/Hexagon/memset-inline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/memset-inline.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/memset-inline.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/memset-inline.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+target triple = "hexagon-unknown--elf"
+
+; Test to see if we inline memsets when the array size is small.
+; CHECK-LABEL: f0
+; CHECK-DAG: memw
+; CHECK-DAG: memb
+; CHECK-DAG: memh
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca [10 x i32], align 8
+  %v1 = bitcast [10 x i32]* %v0 to i8*
+  call void @llvm.memset.p0i8.i32(i8* align 8 %v1, i8 0, i32 7, i1 false)
+  %v2 = getelementptr inbounds [10 x i32], [10 x i32]* %v0, i32 0, i32 0
+  call void @f1(i32* %v2) #0
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare void @f1(i32*) #0
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/mipi-double-small.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mipi-double-small.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mipi-double-small.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/mipi-double-small.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  %v0 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> undef)
+  store <32 x i32> %v0, <32 x i32>* undef, align 128
+  unreachable
+
+b3:                                               ; preds = %b1
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length128b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/mpysin-imm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mpysin-imm.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mpysin-imm.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/mpysin-imm.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,18 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; M2_mpysin takes 8-bit unsigned immediates and is not extendable.
+; CHECK-NOT: = -mpyi(r{{[0-9]*}},#1536)
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = mul nsw i32 %a0, 1536
+  store i32 %v0, i32* @g0, align 4
+  %v1 = sub nsw i32 0, %v0
+  ret i32 %v1
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/mul64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/mul64.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/mul64.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/mul64.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,292 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; This test checks for the generation of 64b mul instruction
+; (dpmpyss_s0 and dpmpyuu_s0).
+
+; Checks for unsigned multiplication.
+
+; 16 x 16 = 64
+; CHECK-LABEL: f0:
+; CHECK: r1:0 = mpyu(
+define i64 @f0(i16 zeroext %a0, i16 zeroext %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = zext i16 %a0 to i64
+  %v1 = zext i16 %a1 to i64
+  %v2 = mul nuw nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+; 32 x 32 = 64
+; CHECK-LABEL: f1:
+; CHECK: r1:0 = mpyu(
+define i64 @f1(i32 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = zext i32 %a0 to i64
+  %v1 = zext i32 %a1 to i64
+  %v2 = mul nuw nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+; Given int w[2], short h[4], signed char c[8], the below tests check for the
+; generation of dpmpyuu_s0.
+; w[0] * h[0]
+; CHECK-LABEL: f2:
+; CHECK: = sxth
+; CHECK: r1:0 = mpyu(
+define i64 @f2(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = and i64 %a0, 4294967295
+  %v1 = trunc i64 %a1 to i32
+  %v2 = shl i32 %v1, 16
+  %v3 = ashr exact i32 %v2, 16
+  %v4 = zext i32 %v3 to i64
+  %v5 = mul nuw i64 %v0, %v4
+  ret i64 %v5
+}
+
+; w[0] * h[1]
+; CHECK-LABEL: f3:
+; CHECK: = asrh
+; CHECK: r1:0 = mpyu(
+define i64 @f3(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = and i64 %a0, 4294967295
+  %v1 = trunc i64 %a1 to i32
+  %v2 = ashr i32 %v1, 16
+  %v3 = zext i32 %v2 to i64
+  %v4 = mul nuw i64 %v0, %v3
+  ret i64 %v4
+}
+
+; w[0] * h[2]
+; CHECK-LABEL: f4:
+; CHECK: = extract(
+; CHECK: r1:0 = mpyu(
+define i64 @f4(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = and i64 %a0, 4294967295
+  %v1 = lshr i64 %a1, 32
+  %v2 = shl nuw nsw i64 %v1, 16
+  %v3 = trunc i64 %v2 to i32
+  %v4 = ashr exact i32 %v3, 16
+  %v5 = zext i32 %v4 to i64
+  %v6 = mul nuw i64 %v0, %v5
+  ret i64 %v6
+}
+
+; w[0] * h[3]
+; CHECK-LABEL: f5:
+; CHECK: = extractu(
+; CHECK: r1:0 = mpyu(
+define i64 @f5(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = and i64 %a0, 4294967295
+  %v1 = lshr i64 %a1, 48
+  %v2 = shl nuw nsw i64 %v1, 16
+  %v3 = trunc i64 %v2 to i32
+  %v4 = ashr exact i32 %v3, 16
+  %v5 = zext i32 %v4 to i64
+  %v6 = mul nuw i64 %v0, %v5
+  ret i64 %v6
+}
+
+; w[1] * h[0]
+; CHECK-LABEL: f6:
+; CHECK: = sxth(
+; CHECK: r1:0 = mpyu(
+define i64 @f6(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = lshr i64 %a0, 32
+  %v1 = trunc i64 %a1 to i32
+  %v2 = shl i32 %v1, 16
+  %v3 = ashr exact i32 %v2, 16
+  %v4 = zext i32 %v3 to i64
+  %v5 = mul nuw i64 %v0, %v4
+  ret i64 %v5
+}
+
+; w[0] * c[0]
+; CHECK-LABEL: f7:
+; CHECK: = and({{.*}}#255)
+; CHECK: r1:0 = mpyu(
+define i64 @f7(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = and i64 %a0, 4294967295
+  %v1 = and i64 %a1, 255
+  %v2 = mul nuw nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+; w[0] * c[2]
+; CHECK-LABEL: f8:
+; CHECK: = extractu(
+; CHECK: r1:0 = mpyu(
+define i64 @f8(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = and i64 %a0, 4294967295
+  %v1 = lshr i64 %a1, 16
+  %v2 = and i64 %v1, 255
+  %v3 = mul nuw nsw i64 %v2, %v0
+  ret i64 %v3
+}
+
+; w[0] * c[7]
+; CHECK-LABEL: f9:
+; CHECK: = lsr(
+; CHECK: r1:0 = mpyu(
+define i64 @f9(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = and i64 %a0, 4294967295
+  %v1 = lshr i64 %a1, 56
+  %v2 = mul nuw nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+
+; Checks for signed multiplication.
+
+; 16 x 16 = 64
+; CHECK-LABEL: f10:
+; CHECK: r1:0 = mpy(
+define i64 @f10(i16 signext %a0, i16 signext %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = sext i16 %a0 to i64
+  %v1 = sext i16 %a1 to i64
+  %v2 = mul nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+; 32 x 32 = 64
+; CHECK-LABEL: f11:
+; CHECK: r1:0 = mpy(
+define i64 @f11(i32 %a0, i32 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = sext i32 %a0 to i64
+  %v1 = sext i32 %a1 to i64
+  %v2 = mul nsw i64 %v1, %v0
+  ret i64 %v2
+}
+
+; Given unsigned int w[2], unsigned short h[4], unsigned char c[8], the below
+; tests check for the generation of dpmpyss_s0.
+; w[0] * h[0]
+; CHECK-LABEL: f12:
+; CHECK: = sxth
+; CHECK: r1:0 = mpy(
+define i64 @f12(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = shl i64 %a0, 32
+  %v1 = ashr exact i64 %v0, 32
+  %v2 = shl i64 %a1, 48
+  %v3 = ashr exact i64 %v2, 48
+  %v4 = mul nsw i64 %v3, %v1
+  ret i64 %v4
+}
+
+; w[0] * h[1]
+; CHECK-LABEL: f13:
+; CHECK: = asrh
+; CHECK: r1:0 = mpy(
+define i64 @f13(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = shl i64 %a0, 32
+  %v1 = ashr exact i64 %v0, 32
+  %v2 = trunc i64 %a1 to i32
+  %v3 = ashr i32 %v2, 16
+  %v4 = sext i32 %v3 to i64
+  %v5 = mul nsw i64 %v1, %v4
+  ret i64 %v5
+}
+
+; w[0] * h[2]
+; CHECK-LABEL: f14:
+; CHECK: = extract(
+; CHECK: r1:0 = mpy(
+define i64 @f14(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = shl i64 %a0, 32
+  %v1 = ashr exact i64 %v0, 32
+  %v2 = lshr i64 %a1, 32
+  %v3 = shl nuw nsw i64 %v2, 16
+  %v4 = trunc i64 %v3 to i32
+  %v5 = ashr exact i32 %v4, 16
+  %v6 = sext i32 %v5 to i64
+  %v7 = mul nsw i64 %v1, %v6
+  ret i64 %v7
+}
+
+; w[0] * h[3]
+; CHECK-LABEL: f15:
+; CHECK: = sxth(
+; CHECK: r1:0 = mpy(
+define i64 @f15(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = ashr i64 %a0, 32
+  %v1 = shl i64 %a1, 48
+  %v2 = ashr exact i64 %v1, 48
+  %v3 = mul nsw i64 %v2, %v0
+  ret i64 %v3
+}
+
+; w[1] * h[0]
+; CHECK-LABEL: f16:
+; CHECK: = asrh(
+; CHECK: r1:0 = mpy(
+define i64 @f16(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = ashr i64 %a0, 32
+  %v1 = trunc i64 %a1 to i32
+  %v2 = ashr i32 %v1, 16
+  %v3 = sext i32 %v2 to i64
+  %v4 = mul nsw i64 %v0, %v3
+  ret i64 %v4
+}
+
+; w[0] * c[0]
+; CHECK-LABEL: f17:
+; CHECK: = sxtb(
+; CHECK: r1:0 = mpy(
+define i64 @f17(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = shl i64 %a0, 32
+  %v1 = ashr exact i64 %v0, 32
+  %v2 = shl i64 %a1, 56
+  %v3 = ashr exact i64 %v2, 56
+  %v4 = mul nsw i64 %v3, %v1
+  ret i64 %v4
+}
+
+; w[0] * c[2]
+; CHECK-LABEL: f18:
+; CHECK: = extract(
+; CHECK: r1:0 = mpy(
+define i64 @f18(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = shl i64 %a0, 32
+  %v1 = ashr exact i64 %v0, 32
+  %v2 = lshr i64 %a1, 16
+  %v3 = shl i64 %v2, 24
+  %v4 = trunc i64 %v3 to i32
+  %v5 = ashr exact i32 %v4, 24
+  %v6 = sext i32 %v5 to i64
+  %v7 = mul nsw i64 %v1, %v6
+  ret i64 %v7
+}
+
+; w[0] * c[7]
+; CHECK-LABEL: f19:
+; CHECK: = sxtb(
+; CHECK: r1:0 = mpy(
+define i64 @f19(i64 %a0, i64 %a1) local_unnamed_addr #0 {
+b0:
+  %v0 = shl i64 %a0, 32
+  %v1 = ashr exact i64 %v0, 32
+  %v2 = lshr i64 %a1, 56
+  %v3 = shl nuw nsw i64 %v2, 24
+  %v4 = trunc i64 %v3 to i32
+  %v5 = ashr exact i32 %v4, 24
+  %v6 = sext i32 %v5 to i64
+  %v7 = mul nsw i64 %v1, %v6
+  ret i64 %v7
+}
+
+attributes #0 = { norecurse nounwind readnone "target-cpu"="hexagonv60" "target-features"="-hvx" }

Added: llvm/trunk/test/CodeGen/Hexagon/muxii-crash.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/muxii-crash.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/muxii-crash.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/muxii-crash.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,21 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Make sure this doesn't crash.
+; CHECK: jumpr r31
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+declare void @f0() #0
+
+; Function Attrs: nounwind
+define i32 @f1(i32 %a0) #0 {
+b0:
+  %v0 = icmp slt i32 %a0, 3
+  %v1 = select i1 %v0, void ()* @f0, void ()* null
+  %v2 = ptrtoint void ()* %v1 to i32
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/neg-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/neg-op.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/neg-op.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/neg-op.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,14 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = sub(#0,r{{[0-9]+}})
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 %a0, i32* %v0, align 4
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = sub nsw i32 0, %v1
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/newvaluejump-postinc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/newvaluejump-postinc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/newvaluejump-postinc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/newvaluejump-postinc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,111 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK-NOT: if {{.*}} cmp{{.*}}jump
+
+%s.0 = type opaque
+%s.1 = type { i8*, i8*, %s.2*, i32, [0 x i8] }
+%s.2 = type opaque
+
+ at g0 = private unnamed_addr constant [29 x i8] c"BUG: failure at %s:%d/%s()!\0A\00", align 1
+ at g1 = private unnamed_addr constant [11 x i8] c"fs/namei.c\00", align 1
+ at g2 = private unnamed_addr constant [8 x i8] c"putname\00", align 1
+ at g3 = private unnamed_addr constant [5 x i8] c"BUG!\00", align 1
+ at g4 = external global %s.0*, align 4
+
+; Function Attrs: nounwind
+define void @f0(%s.1* %a0) #0 {
+b0:
+  %v0 = alloca %s.1*, align 4
+  store %s.1* %a0, %s.1** %v0, align 4
+  br label %b1, !llvm.loop !0
+
+b1:                                               ; preds = %b0
+  %v1 = load %s.1*, %s.1** %v0, align 4
+  %v2 = getelementptr inbounds %s.1, %s.1* %v1, i32 0, i32 3
+  %v3 = load i32, i32* %v2, align 4
+  %v4 = icmp sle i32 %v3, 0
+  %v5 = xor i1 %v4, true
+  %v6 = xor i1 %v5, true
+  %v7 = zext i1 %v6 to i32
+  %v8 = call i32 @llvm.expect.i32(i32 %v7, i32 0)
+  %v9 = icmp ne i32 %v8, 0
+  br i1 %v9, label %b2, label %b5
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b2
+  %v10 = call i32 (i8*, ...) @f1(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g0, i32 0, i32 0), i8* getelementptr inbounds ([11 x i8], [11 x i8]* @g1, i32 0, i32 0), i32 246, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @g2, i32 0, i32 0))
+  call void (i8*, ...) @f2(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g3, i32 0, i32 0))
+  unreachable
+
+b4:                                               ; No predecessors!
+  br label %b5
+
+b5:                                               ; preds = %b4, %b1
+  br label %b6
+
+b6:                                               ; preds = %b5
+  %v11 = load %s.1*, %s.1** %v0, align 4
+  %v12 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 3
+  %v13 = load i32, i32* %v12, align 4
+  %v14 = add i32 %v13, -1
+  store i32 %v14, i32* %v12, align 4
+  %v15 = icmp sgt i32 %v14, 0
+  br i1 %v15, label %b7, label %b8
+
+b7:                                               ; preds = %b6
+  br label %b11
+
+b8:                                               ; preds = %b6
+  %v16 = load %s.1*, %s.1** %v0, align 4
+  %v17 = getelementptr inbounds %s.1, %s.1* %v16, i32 0, i32 0
+  %v18 = load i8*, i8** %v17, align 4
+  %v19 = load %s.1*, %s.1** %v0, align 4
+  %v20 = getelementptr inbounds %s.1, %s.1* %v19, i32 0, i32 4
+  %v21 = getelementptr inbounds [0 x i8], [0 x i8]* %v20, i32 0, i32 0
+  %v22 = icmp ne i8* %v18, %v21
+  br i1 %v22, label %b9, label %b10
+
+b9:                                               ; preds = %b8
+  %v23 = load %s.0*, %s.0** @g4, align 4
+  %v24 = load %s.1*, %s.1** %v0, align 4
+  %v25 = getelementptr inbounds %s.1, %s.1* %v24, i32 0, i32 0
+  %v26 = load i8*, i8** %v25, align 4
+  call void @f3(%s.0* %v23, i8* %v26)
+  %v27 = load %s.1*, %s.1** %v0, align 4
+  %v28 = bitcast %s.1* %v27 to i8*
+  call void @f4(i8* %v28)
+  br label %b11
+
+b10:                                              ; preds = %b8
+  %v29 = load %s.0*, %s.0** @g4, align 4
+  %v30 = load %s.1*, %s.1** %v0, align 4
+  %v31 = bitcast %s.1* %v30 to i8*
+  call void @f3(%s.0* %v29, i8* %v31)
+  br label %b11
+
+b11:                                              ; preds = %b10, %b9, %b7
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.expect.i32(i32, i32) #1
+
+; Function Attrs: nounwind
+declare i32 @f1(i8*, ...) #0
+
+; Function Attrs: noreturn
+declare void @f2(i8*, ...) #2
+
+; Function Attrs: nounwind
+declare void @f3(%s.0*, i8*) #0
+
+; Function Attrs: nounwind
+declare void @f4(i8*) #0
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noreturn }
+
+!0 = distinct !{!0, !1}
+!1 = !{!"llvm.loop.threadify", i32 101214632}

Added: llvm/trunk/test/CodeGen/Hexagon/newvaluestore2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/newvaluestore2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/newvaluestore2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/newvaluestore2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,23 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Check that we generate new value stores.
+
+; CHECK: r[[REG:[0-9]+]] = sfadd(r{{[0-9]+}},r{{[0-9]+}})
+; CHECK-NOT: }
+; CHECK: memw({{.*}}) = r[[REG]].new
+define void @f0(float %a0, float %a1) #0 {
+b0:
+  %v0 = alloca float, align 4
+  %v1 = alloca float, align 4
+  %v2 = alloca float*, align 4
+  %v3 = alloca i32, align 4
+  %v4 = load float, float* %v0, align 4
+  %v5 = load float, float* %v1, align 4
+  %v6 = fadd float %v5, %v4
+  %v7 = load i32, i32* %v3, align 4
+  %v8 = load float*, float** %v2, align 4
+  %v9 = getelementptr inbounds float, float* %v8, i32 %v7
+  store float %v6, float* %v9, align 4
+  ret void
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/no-falign-function-for-size.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/no-falign-function-for-size.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/no-falign-function-for-size.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/no-falign-function-for-size.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,11 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Don't output falign for function entries when optimizing for size.
+; CHECK-NOT: falign
+
+define i32 @f0() #0 {
+b0:
+  ret i32 0
+}
+
+attributes #0 = { optsize }

Added: llvm/trunk/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/noFalignAfterCallAtO2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,31 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; Check that we don't generate .falign directives after function calls at O2.
+; We need more than one basic block for this test because MachineBlockPlacement
+; will not run on single basic block functions.
+
+declare i32 @f0()
+
+; We don't want faligns after the calls to foo.
+; CHECK:     call f0
+; CHECK-NOT: falign
+; CHECK:     call f0
+; CHECK-NOT: falign
+; CHECK:     dealloc_return
+define i32 @f1(i32 %a0) #0 {
+b0:
+  %v0 = icmp eq i32 %a0, 0
+  br i1 %v0, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v1 = call i32 @f0()
+  %v2 = call i32 @f0()
+  %v3 = add i32 %v1, %v2
+  ret i32 %v3
+
+b2:                                               ; preds = %b0
+  %v4 = add i32 %a0, 5
+  ret i32 %v4
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/no_struct_element.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/no_struct_element.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/no_struct_element.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/no_struct_element.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,36 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; CHECK-NOT: 4294967295
+
+target triple = "hexagon"
+
+%s.0 = type {}
+%s.1 = type { %s.2, %s.6 }
+%s.2 = type { %s.3 }
+%s.3 = type { %s.4 }
+%s.4 = type { %s.5 }
+%s.5 = type { i32 }
+%s.6 = type { %s.6*, %s.6* }
+
+ at g0 = internal global %s.0 zeroinitializer, align 1
+ at g1 = private unnamed_addr constant [23 x i8] c"......................\00", align 1
+
+; Function Attrs: nounwind
+define void @f0(i8* %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = getelementptr inbounds i8, i8* %a0, i32 1028
+  store volatile i32 0, i32* %v0, align 4
+  %v2 = bitcast i8* %v1 to i32*
+  %v3 = load volatile i32, i32* %v0, align 4
+  store volatile i32 %v3, i32* %v2, align 4
+  %v4 = getelementptr inbounds i8, i8* %a0, i32 1032
+  %v5 = bitcast i8* %v4 to %s.1*
+  call void @f1(%s.1* %v5, i8* getelementptr inbounds ([23 x i8], [23 x i8]* @g1, i32 0, i32 0), %s.0* @g0) #0
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @f1(%s.1*, i8*, %s.0*) #0
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/noreturn-noepilog.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/noreturn-noepilog.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/noreturn-noepilog.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/noreturn-noepilog.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Check that no epilogue is inserted after a noreturn call.
+;
+; CHECK-LABEL: f1:
+; CHECK: allocframe(r29,#0):raw
+; CHECK-NOT: deallocframe
+
+target triple = "hexagon"
+
+%s.0 = type <{ i16, i8, i8, i8 }>
+
+ at g0 = internal constant %s.0 <{ i16 1, i8 2, i8 3, i8 4 }>, align 4
+
+; Function Attrs: noreturn
+declare void @f0(%s.0*, i32) #0
+
+define i64 @f1(i32 %a0, i32 %a1) {
+b0:
+  %v0 = icmp ugt i32 %a0, 3
+  br i1 %v0, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  call void @f0(%s.0* nonnull @g0, i32 %a0) #0
+  unreachable
+
+b2:                                               ; preds = %b0
+  %v1 = mul i32 %a1, 7
+  %v2 = zext i32 %v1 to i64
+  ret i64 %v2
+}
+
+attributes #0 = { noreturn }

Added: llvm/trunk/test/CodeGen/Hexagon/noreturn-notail.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/noreturn-notail.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/noreturn-notail.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/noreturn-notail.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,33 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that we are emitting a regular call instead of a tail call for a
+; noreturn call in a function with a non-empty frame (to save instructions).
+;
+; CHECK: call f0
+; CHECK-NOT: deallocframe
+
+target triple = "hexagon"
+
+; Function Attrs: noreturn
+declare void @f0(i32, i32*) #0
+
+declare void @f1(i32*)
+
+define i64 @f2(i32 %a0, i32 %a1) {
+b0:
+  %v0 = alloca i32
+  call void @f1(i32* %v0)
+  %v1 = icmp ugt i32 %a0, 3
+  br i1 %v1, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  tail call void @f0(i32 %a0, i32* %v0) #0
+  unreachable
+
+b2:                                               ; preds = %b0
+  %v2 = mul i32 %a1, 7
+  %v3 = zext i32 %v2 to i64
+  ret i64 %v3
+}
+
+attributes #0 = { noreturn }

Added: llvm/trunk/test/CodeGen/Hexagon/not-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/not-op.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/not-op.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/not-op.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,13 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}} = sub(#-1,r{{[0-9]+}})
+
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 %a0, i32* %v0, align 4
+  %v1 = load i32, i32* %v0, align 4
+  %v2 = xor i32 %v1, -1
+  ret i32 %v2
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/ntstbit.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/ntstbit.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/ntstbit.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/ntstbit.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: !tstbit
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  %v0 = shl i32 1, %a2
+  %v1 = and i32 %v0, %a1
+  %v2 = icmp eq i32 %v1, 0
+  br i1 %v2, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  tail call void bitcast (void (...)* @f1 to void ()*)() #0
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v3 = tail call i32 bitcast (i32 (...)* @f2 to i32 ()*)() #0
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  %v4 = add nsw i32 %a1, 2
+  %v5 = tail call i32 bitcast (i32 (...)* @f3 to i32 (i32, i32)*)(i32 %a0, i32 %v4) #0
+  ret i32 0
+}
+
+declare void @f1(...)
+
+declare i32 @f2(...)
+
+declare i32 @f3(...)
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/nv_store_vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/nv_store_vec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/nv_store_vec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/nv_store_vec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,21 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+;
+; Check that we generate new value stores in V60.
+
+; CHECK: v{{[0-9]+}} = valign(v{{[0-9]+}},v{{[0-9]+}},r{{[0-9]+}})
+; CHECK: vmem(r{{[0-9]+}}+#{{[0-9]+}}) = v{{[0-9]+}}.new
+
+define void @f0(i16* nocapture readonly %a0, i32 %a1, i16* nocapture %a2) #0 {
+b0:
+  %v0 = bitcast i16* %a0 to <16 x i32>*
+  %v1 = bitcast i16* %a2 to <16 x i32>*
+  %v2 = load <16 x i32>, <16 x i32>* %v0, align 64
+  %v3 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v2, <16 x i32> undef, i32 %a1)
+  store <16 x i32> %v3, <16 x i32>* %v1, align 64
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #0
+
+attributes #0 = { nounwind readnone "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }

Added: llvm/trunk/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/opt-addr-mode-subreg-use.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,196 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { i32*, i32*, i32* }
+%s.1 = type { i32*, i32*, i32* }
+%s.2 = type { i32*, i32**, i32**, i32**, i32***, i32* }
+%s.3 = type { i32*, i32*, i32* }
+%s.4 = type { i32*, i32*, i32* }
+%s.5 = type { i32*, i32*, i32 }
+
+; Function Attrs: nounwind optsize
+declare zeroext i1 @f0(i32*) #0 align 2
+
+; Function Attrs: nounwind optsize
+declare zeroext i1 @f1(i32*) #0 align 2
+
+; Function Attrs: optsize
+declare hidden void @f2(i32* noalias nocapture sret, i32) #1 align 2
+
+; Function Attrs: optsize
+declare hidden void @f3(i32* noalias nocapture sret, i32) #1 align 2
+
+; Function Attrs: optsize
+declare hidden void @f4(i32* noalias nocapture sret, i32) #1 align 2
+
+; Function Attrs: optsize
+declare hidden void @f5(i32* noalias nocapture sret, i32) #1 align 2
+
+; Function Attrs: optsize
+declare hidden void @f6(i32* noalias nocapture sret, i32) #1 align 2
+
+; Function Attrs: optsize
+declare hidden void @f7(i32* noalias nocapture sret, i32) #1 align 2
+
+; Function Attrs: optsize
+declare zeroext i1 @f8(i32*, i32*, i64) #1 align 2
+
+; Function Attrs: nounwind optsize
+declare i32* @f9(i32* nocapture readonly) #0 align 2
+
+; Function Attrs: optsize
+define void @f10(i32* %a0, i32* dereferenceable(64) %a1) #1 align 2 {
+b0:
+  %v0 = alloca %s.0, align 4
+  %v1 = alloca %s.1, align 4
+  %v2 = alloca %s.2, align 4
+  %v3 = alloca %s.3, align 4
+  %v4 = alloca %s.4, align 4
+  %v5 = alloca %s.5, align 8
+  br i1 undef, label %b34, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  %v6 = ptrtoint %s.0* %v0 to i32
+  %v7 = zext i32 %v6 to i64
+  %v8 = shl nuw i64 %v7, 32
+  %v9 = or i64 %v8, zext (i32 ptrtoint (void (i32*, i32)* @f2 to i32) to i64)
+  %v10 = ptrtoint %s.4* %v4 to i32
+  %v11 = zext i32 %v10 to i64
+  %v12 = shl nuw i64 %v11, 32
+  %v13 = or i64 %v12, zext (i32 ptrtoint (void (i32*, i32)* @f5 to i32) to i64)
+  %v14 = ptrtoint %s.5* %v5 to i32
+  %v15 = zext i32 %v14 to i64
+  %v16 = shl nuw i64 %v15, 32
+  %v17 = or i64 %v16, zext (i32 ptrtoint (void (i32*, i32)* @f6 to i32) to i64)
+  %v18 = ptrtoint %s.1* %v1 to i32
+  %v19 = zext i32 %v18 to i64
+  %v20 = shl nuw i64 %v19, 32
+  %v21 = or i64 %v20, zext (i32 ptrtoint (void (i32*, i32)* @f3 to i32) to i64)
+  %v22 = ptrtoint %s.2* %v2 to i32
+  %v23 = zext i32 %v22 to i64
+  %v24 = shl nuw i64 %v23, 32
+  %v25 = or i64 %v24, zext (i32 ptrtoint (void (i32*, i32)* @f4 to i32) to i64)
+  %v26 = ptrtoint %s.3* %v3 to i32
+  %v27 = zext i32 %v26 to i64
+  %v28 = shl nuw i64 %v27, 32
+  %v29 = or i64 %v28, zext (i32 ptrtoint (void (i32*, i32)* @f7 to i32) to i64)
+  %v30 = call i32* @f9(i32* nonnull null) #1
+  br i1 undef, label %b5, label %b4
+
+b3:                                               ; preds = %b1
+  unreachable
+
+b4:                                               ; preds = %b2
+  store i32* null, i32** null, align 4
+  %v31 = call zeroext i1 @f0(i32* null) #0
+  br i1 %v31, label %b6, label %b32
+
+b5:                                               ; preds = %b2
+  unreachable
+
+b6:                                               ; preds = %b4
+  br i1 undef, label %b7, label %b32
+
+b7:                                               ; preds = %b6
+  br i1 undef, label %b8, label %b32
+
+b8:                                               ; preds = %b7
+  br i1 undef, label %b9, label %b32
+
+b9:                                               ; preds = %b8
+  br i1 undef, label %b10, label %b32
+
+b10:                                              ; preds = %b9
+  %v32 = call zeroext i1 @f1(i32* null) #0
+  br i1 %v32, label %b11, label %b32
+
+b11:                                              ; preds = %b10
+  br i1 undef, label %b13, label %b12
+
+b12:                                              ; preds = %b11
+  unreachable
+
+b13:                                              ; preds = %b11
+  %v33 = call zeroext i1 @f0(i32* undef) #0
+  br i1 %v33, label %b14, label %b32
+
+b14:                                              ; preds = %b13
+  br i1 undef, label %b16, label %b15
+
+b15:                                              ; preds = %b14
+  unreachable
+
+b16:                                              ; preds = %b14
+  %v34 = call zeroext i1 @f1(i32* null) #0
+  br i1 %v34, label %b18, label %b17
+
+b17:                                              ; preds = %b16
+  unreachable
+
+b18:                                              ; preds = %b16
+  br i1 undef, label %b19, label %b32
+
+b19:                                              ; preds = %b18
+  br i1 undef, label %b26, label %b20
+
+b20:                                              ; preds = %b19
+  br i1 undef, label %b22, label %b21
+
+b21:                                              ; preds = %b20
+  br i1 undef, label %b23, label %b32
+
+b22:                                              ; preds = %b20
+  unreachable
+
+b23:                                              ; preds = %b21
+  br i1 undef, label %b24, label %b32
+
+b24:                                              ; preds = %b23
+  %v35 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 undef) #1
+  br i1 %v35, label %b25, label %b32
+
+b25:                                              ; preds = %b24
+  %v36 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v9) #1
+  unreachable
+
+b26:                                              ; preds = %b19
+  br i1 undef, label %b27, label %b32
+
+b27:                                              ; preds = %b26
+  br i1 undef, label %b28, label %b32
+
+b28:                                              ; preds = %b27
+  br i1 undef, label %b31, label %b29
+
+b29:                                              ; preds = %b28
+  %v37 = call zeroext i1 @f8(i32* nonnull %a1, i32* null, i64 %v21) #1
+  %v38 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v25) #1
+  br i1 %v38, label %b30, label %b32
+
+b30:                                              ; preds = %b29
+  %v39 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v29) #1
+  unreachable
+
+b31:                                              ; preds = %b28
+  %v40 = call zeroext i1 @f8(i32* nonnull %a1, i32* null, i64 %v13) #1
+  %v41 = call zeroext i1 @f8(i32* nonnull %a1, i32* undef, i64 %v17) #1
+  br i1 %v41, label %b33, label %b32
+
+b32:                                              ; preds = %b31, %b29, %b27, %b26, %b24, %b23, %b21, %b18, %b13, %b10, %b9, %b8, %b7, %b6, %b4
+  unreachable
+
+b33:                                              ; preds = %b31
+  store i32* %a0, i32** undef, align 4
+  unreachable
+
+b34:                                              ; preds = %b0
+  ret void
+}
+
+attributes #0 = { nounwind optsize }
+attributes #1 = { optsize }

Added: llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-000.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-000.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-000.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-000.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,180 @@
+; RUN: llc -march=hexagon -O2 -disable-hexagon-misched < %s | FileCheck %s
+
+target triple = "hexagon-unknown--elf"
+
+; CHECK-LABEL: f1:
+; CHECK-DAG:      r16 = ##.Lg0+32767
+; CHECK-DAG:      r17 = ##g1+32767
+
+; CHECK-LABEL: LBB0_2:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32767)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_3:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32757)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_4:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32747)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_5:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32737)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_6:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32727)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_7:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32717)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_8:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32707)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_9:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32697)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_10:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32687)
+; CHECK:        }
+
+; CHECK-LABEL: LBB0_11:
+; CHECK:        {
+; CHECK-DAG:      call f0
+; CHECK-DAG:      r0 = add(r16,#-32767)
+; CHECK-DAG:      r1 = add(r17,#-32677)
+; CHECK:        }
+
+ at g0 = private unnamed_addr constant [4 x i8] c"%s\0A\00", align 1
+ at g1 = internal constant [10 x [10 x i8]] [[10 x i8] c"[0000]\00\00\00\00", [10 x i8] c"[0001]\00\00\00\00", [10 x i8] c"[0002]\00\00\00\00", [10 x i8] c"[0003]\00\00\00\00", [10 x i8] c"[0004]\00\00\00\00", [10 x i8] c"[0005]\00\00\00\00", [10 x i8] c"[0006]\00\00\00\00", [10 x i8] c"[0007]\00\00\00\00", [10 x i8] c"[0008]\00\00\00\00", [10 x i8] c"[0009]\00\00\00\00"], align 16
+
+declare i32 @f0(i8*, i8*)
+
+; Function Attrs: nounwind
+define i32 @f1(i32 %a0, i8** %a1) #0 {
+b0:
+  %v01 = alloca i32, align 4
+  %v12 = alloca i32, align 4
+  %v23 = alloca i8**, align 4
+  %v34 = alloca i32, align 4
+  store i32 0, i32* %v01
+  store i32 %a0, i32* %v12, align 4
+  store i8** %a1, i8*** %v23, align 4
+  %v45 = load i8**, i8*** %v23, align 4
+  %v56 = getelementptr inbounds i8*, i8** %v45, i32 1
+  %v67 = load i8*, i8** %v56, align 4
+  %v78 = call i32 @f2(i8* %v67)
+  store i32 %v78, i32* %v34, align 4
+  %v89 = load i32, i32* %v34, align 4
+  switch i32 %v89, label %b11 [
+    i32 0, label %b1
+    i32 1, label %b2
+    i32 2, label %b3
+    i32 3, label %b4
+    i32 4, label %b5
+    i32 5, label %b6
+    i32 6, label %b7
+    i32 7, label %b8
+    i32 8, label %b9
+    i32 9, label %b10
+  ]
+
+b1:                                               ; preds = %b0
+  %v910 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 0
+  %v10 = getelementptr inbounds [10 x i8], [10 x i8]* %v910, i32 0, i32 0
+  %v11 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v10)
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v1211 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 1
+  %v13 = getelementptr inbounds [10 x i8], [10 x i8]* %v1211, i32 0, i32 0
+  %v14 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v13)
+  br label %b3
+
+b3:                                               ; preds = %b2, %b0
+  %v15 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 2
+  %v16 = getelementptr inbounds [10 x i8], [10 x i8]* %v15, i32 0, i32 0
+  %v17 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v16)
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v18 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 3
+  %v19 = getelementptr inbounds [10 x i8], [10 x i8]* %v18, i32 0, i32 0
+  %v20 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v19)
+  br label %b5
+
+b5:                                               ; preds = %b4, %b0
+  %v21 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 4
+  %v22 = getelementptr inbounds [10 x i8], [10 x i8]* %v21, i32 0, i32 0
+  %v2312 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v22)
+  br label %b6
+
+b6:                                               ; preds = %b5, %b0
+  %v24 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 5
+  %v25 = getelementptr inbounds [10 x i8], [10 x i8]* %v24, i32 0, i32 0
+  %v26 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v25)
+  br label %b7
+
+b7:                                               ; preds = %b6, %b0
+  %v27 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 6
+  %v28 = getelementptr inbounds [10 x i8], [10 x i8]* %v27, i32 0, i32 0
+  %v29 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v28)
+  br label %b8
+
+b8:                                               ; preds = %b7, %b0
+  %v30 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 7
+  %v31 = getelementptr inbounds [10 x i8], [10 x i8]* %v30, i32 0, i32 0
+  %v32 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v31)
+  br label %b9
+
+b9:                                               ; preds = %b8, %b0
+  %v33 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 8
+  %v3413 = getelementptr inbounds [10 x i8], [10 x i8]* %v33, i32 0, i32 0
+  %v35 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v3413)
+  br label %b10
+
+b10:                                              ; preds = %b9, %b0
+  %v36 = getelementptr inbounds [10 x [10 x i8]], [10 x [10 x i8]]* @g1, i32 0, i32 9
+  %v37 = getelementptr inbounds [10 x i8], [10 x i8]* %v36, i32 0, i32 0
+  %v38 = call i32 @f0(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @g0, i32 0, i32 0), i8* %v37)
+  br label %b11
+
+b11:                                              ; preds = %b10, %b0
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare i32 @f2(i8*) #0
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-001.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-001.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-001.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-001.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,194 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; Ensure that the second use of ##grcolor doesn't get replaced with
+; r26 which is an induction variable
+
+; CHECK: r{{[0-9]+}} = ##g4
+; CHECK: r{{[0-9]+}} = {{.*}}##g4
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = external global [450 x i32]
+ at g1 = external global [842 x i32]
+ at g2 = external global [750 x i32]
+ at g3 = external global [750 x i32]
+ at g4 = external global [750 x i32]
+ at g5 = external global [750 x i32]
+ at g6 = external global [750 x i32]
+ at g7 = external global [750 x i32]
+ at g8 = external global [750 x i32]
+ at g9 = external global [750 x i32]
+ at g10 = external global i32
+ at g11 = external global [0 x i32]
+ at g12 = external global [0 x i32]
+
+; Function Attrs: nounwind readonly
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = load i32, i32* @g10, align 4, !tbaa !0
+  %v1 = icmp sgt i32 %v0, 0
+  br i1 %v1, label %b1, label %b21
+
+b1:                                               ; preds = %b0
+  %v2 = getelementptr inbounds [842 x i32], [842 x i32]* @g1, i32 0, i32 %a0
+  br label %b2
+
+b2:                                               ; preds = %b19, %b1
+  %v3 = phi i32 [ 0, %b1 ], [ %v79, %b19 ]
+  %v4 = phi i32 [ 32767, %b1 ], [ %v78, %b19 ]
+  %v5 = phi i32 [ 0, %b1 ], [ %v77, %b19 ]
+  %v6 = phi i32 [ 0, %b1 ], [ %v76, %b19 ]
+  %v7 = phi i32 [ 0, %b1 ], [ %v80, %b19 ]
+  %v8 = getelementptr inbounds [750 x i32], [750 x i32]* @g5, i32 0, i32 %v7
+  %v9 = load i32, i32* %v8, align 4, !tbaa !0
+  %v10 = icmp eq i32 %v9, 0
+  br i1 %v10, label %b19, label %b3
+
+b3:                                               ; preds = %b2
+  %v11 = getelementptr inbounds [750 x i32], [750 x i32]* @g4, i32 0, i32 %v7
+  %v12 = load i32, i32* %v11, align 4, !tbaa !0
+  %v13 = load i32, i32* %v2, align 4, !tbaa !0
+  %v14 = getelementptr inbounds [750 x i32], [750 x i32]* @g4, i32 0, i32 %v13
+  %v15 = load i32, i32* %v14, align 4, !tbaa !0
+  %v16 = icmp eq i32 %v12, %v15
+  br i1 %v16, label %b4, label %b8
+
+b4:                                               ; preds = %b3
+  %v17 = getelementptr inbounds [750 x i32], [750 x i32]* @g6, i32 0, i32 %v7
+  %v18 = load i32, i32* %v17, align 4, !tbaa !0
+  %v19 = icmp eq i32 %v18, 25
+  br i1 %v19, label %b5, label %b19
+
+b5:                                               ; preds = %b4
+  %v20 = getelementptr inbounds [750 x i32], [750 x i32]* @g2, i32 0, i32 %v7
+  %v21 = load i32, i32* %v20, align 4, !tbaa !0
+  %v22 = icmp slt i32 %v21, 19
+  br i1 %v22, label %b6, label %b19
+
+b6:                                               ; preds = %b5
+  %v23 = getelementptr inbounds [750 x i32], [750 x i32]* @g9, i32 0, i32 %v7
+  %v24 = load i32, i32* %v23, align 4, !tbaa !0
+  %v25 = icmp eq i32 %v24, 0
+  br i1 %v25, label %b19, label %b7
+
+b7:                                               ; preds = %b6
+  %v26 = getelementptr inbounds [750 x i32], [750 x i32]* @g8, i32 0, i32 %v7
+  %v27 = load i32, i32* %v26, align 4, !tbaa !0
+  %v28 = mul nsw i32 %v27, 50
+  %v29 = add nsw i32 %v28, %v3
+  br label %b19
+
+b8:                                               ; preds = %b3
+  %v30 = getelementptr inbounds [750 x i32], [750 x i32]* @g9, i32 0, i32 %v7
+  %v31 = load i32, i32* %v30, align 4, !tbaa !0
+  %v32 = icmp eq i32 %v31, 0
+  br i1 %v32, label %b13, label %b9
+
+b9:                                               ; preds = %b8
+  %v33 = getelementptr inbounds [750 x i32], [750 x i32]* @g7, i32 0, i32 %v7
+  %v34 = load i32, i32* %v33, align 4, !tbaa !0
+  %v35 = icmp eq i32 %v34, 0
+  br i1 %v35, label %b10, label %b13
+
+b10:                                              ; preds = %b9
+  %v36 = getelementptr inbounds [750 x i32], [750 x i32]* @g6, i32 0, i32 %v7
+  %v37 = load i32, i32* %v36, align 4, !tbaa !0
+  %v38 = icmp slt i32 %v37, 18
+  br i1 %v38, label %b11, label %b13
+
+b11:                                              ; preds = %b10
+  %v39 = getelementptr inbounds [0 x i32], [0 x i32]* @g11, i32 0, i32 %v37
+  %v40 = load i32, i32* %v39, align 4, !tbaa !0
+  %v41 = add nsw i32 %v40, 50
+  %v42 = getelementptr inbounds [750 x i32], [750 x i32]* @g8, i32 0, i32 %v7
+  %v43 = load i32, i32* %v42, align 4, !tbaa !0
+  %v44 = mul nsw i32 %v41, %v43
+  %v45 = icmp slt i32 %v44, %v4
+  br i1 %v45, label %b12, label %b19
+
+b12:                                              ; preds = %b11
+  br label %b19
+
+b13:                                              ; preds = %b10, %b9, %b8
+  %v46 = getelementptr inbounds [750 x i32], [750 x i32]* @g2, i32 0, i32 %v7
+  %v47 = load i32, i32* %v46, align 4, !tbaa !0
+  %v48 = and i32 %v47, 31
+  %v49 = getelementptr inbounds [0 x i32], [0 x i32]* @g12, i32 0, i32 %v48
+  %v50 = load i32, i32* %v49, align 4, !tbaa !0
+  %v51 = icmp eq i32 %v50, 0
+  br i1 %v51, label %b19, label %b14
+
+b14:                                              ; preds = %b13
+  %v52 = getelementptr inbounds [750 x i32], [750 x i32]* @g2, i32 0, i32 %v13
+  %v53 = load i32, i32* %v52, align 4, !tbaa !0
+  %v54 = icmp slt i32 %v53, 11
+  br i1 %v54, label %b15, label %b19
+
+b15:                                              ; preds = %b14
+  %v55 = getelementptr inbounds [750 x i32], [750 x i32]* @g6, i32 0, i32 %v7
+  %v56 = load i32, i32* %v55, align 4, !tbaa !0
+  %v57 = icmp slt i32 %v56, 11
+  br i1 %v57, label %b16, label %b19
+
+b16:                                              ; preds = %b15
+  %v58 = getelementptr inbounds [0 x i32], [0 x i32]* @g11, i32 0, i32 %v56
+  %v59 = load i32, i32* %v58, align 4, !tbaa !0
+  %v60 = add nsw i32 %v59, 50
+  %v61 = getelementptr inbounds [750 x i32], [750 x i32]* @g3, i32 0, i32 %v7
+  %v62 = load i32, i32* %v61, align 4, !tbaa !0
+  %v63 = getelementptr inbounds [450 x i32], [450 x i32]* @g0, i32 0, i32 %v62
+  %v64 = load i32, i32* %v63, align 4, !tbaa !0
+  %v65 = mul nsw i32 %v64, %v60
+  %v66 = sdiv i32 %v65, 2
+  %v67 = add nsw i32 %v66, %v6
+  %v68 = getelementptr inbounds [750 x i32], [750 x i32]* @g8, i32 0, i32 %v7
+  %v69 = load i32, i32* %v68, align 4, !tbaa !0
+  %v70 = icmp sgt i32 %v69, 1
+  br i1 %v70, label %b17, label %b18
+
+b17:                                              ; preds = %b16
+  %v71 = mul nsw i32 %v69, 25
+  %v72 = add nsw i32 %v71, %v67
+  br label %b18
+
+b18:                                              ; preds = %b17, %b16
+  %v73 = phi i32 [ %v72, %b17 ], [ %v67, %b16 ]
+  %v74 = tail call i32 @f1(i32 %v7, i32 %a0)
+  %v75 = add nsw i32 %v74, %v5
+  br label %b19
+
+b19:                                              ; preds = %b18, %b15, %b14, %b13, %b12, %b11, %b7, %b6, %b5, %b4, %b2
+  %v76 = phi i32 [ %v6, %b7 ], [ %v6, %b6 ], [ %v6, %b5 ], [ %v6, %b4 ], [ %v73, %b18 ], [ %v6, %b15 ], [ %v6, %b14 ], [ %v6, %b13 ], [ %v6, %b12 ], [ %v6, %b11 ], [ %v6, %b2 ]
+  %v77 = phi i32 [ %v5, %b7 ], [ %v5, %b6 ], [ %v5, %b5 ], [ %v5, %b4 ], [ %v75, %b18 ], [ %v5, %b15 ], [ %v5, %b14 ], [ %v5, %b13 ], [ %v5, %b12 ], [ %v5, %b11 ], [ %v5, %b2 ]
+  %v78 = phi i32 [ %v4, %b7 ], [ %v4, %b6 ], [ %v4, %b5 ], [ %v4, %b4 ], [ %v4, %b18 ], [ %v4, %b15 ], [ %v4, %b14 ], [ %v4, %b13 ], [ %v44, %b12 ], [ %v4, %b11 ], [ %v4, %b2 ]
+  %v79 = phi i32 [ %v29, %b7 ], [ %v3, %b6 ], [ %v3, %b5 ], [ %v3, %b4 ], [ %v3, %b18 ], [ %v3, %b15 ], [ %v3, %b14 ], [ %v3, %b13 ], [ %v3, %b12 ], [ %v3, %b11 ], [ %v3, %b2 ]
+  %v80 = add nsw i32 %v7, 1
+  %v81 = icmp slt i32 %v80, %v0
+  br i1 %v81, label %b2, label %b20
+
+b20:                                              ; preds = %b19
+  br label %b21
+
+b21:                                              ; preds = %b20, %b0
+  %v82 = phi i32 [ 0, %b0 ], [ %v79, %b20 ]
+  %v83 = phi i32 [ 32767, %b0 ], [ %v78, %b20 ]
+  %v84 = phi i32 [ 0, %b0 ], [ %v77, %b20 ]
+  %v85 = phi i32 [ 0, %b0 ], [ %v76, %b20 ]
+  %v86 = icmp eq i32 %v83, 32767
+  %v87 = sdiv i32 %v83, 2
+  %v88 = select i1 %v86, i32 0, i32 %v87
+  %v89 = add i32 %v84, %v85
+  %v90 = add i32 %v89, %v82
+  %v91 = add i32 %v90, %v88
+  ret i32 %v91
+}
+
+; Function Attrs: nounwind readonly
+declare i32 @f1(i32, i32) #0
+
+attributes #0 = { nounwind readonly }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-003.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-003.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-003.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/opt-glob-addrs-003.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,496 @@
+; RUN: llc -march=hexagon -O3 -verify-machineinstrs < %s
+; REQUIRES: asserts
+; Expect clean compilation.
+
+target triple = "hexagon"
+
+%s.0 = type { i16, i16, [4 x i16], i16, i16, [3 x i16], [3 x [4 x i16]], [3 x i16], [2 x [2 x i16]], i16, i16, i16, i16, [2 x i16], i16, i16, [3 x i16], [17 x i16] }
+
+ at g0 = external global i16
+ at g1 = external global [2 x i16]
+ at g2 = external global [10 x i16]
+ at g3 = external global %s.0
+ at g4 = external global [160 x i16]
+ at g5 = external global i16
+ at g6 = external global i16
+ at g7 = external global i16
+ at g8 = external global i16
+ at g9 = external global i16
+ at g10 = external global i16
+ at g11 = external global i16
+ at g12 = external global [192 x i16]
+ at g13 = external global [10 x i32]
+ at g14 = external global i16
+
+; Function Attrs: nounwind
+define signext i16 @f0(i16 signext %a0, i16* nocapture readonly %a1) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  store i32 327685, i32* %v0, align 4
+  store i32 1048592, i32* %v1, align 4
+  %v2 = sext i16 %a0 to i32
+  switch i32 %v2, label %b35 [
+    i32 0, label %b1
+    i32 1, label %b9
+    i32 2, label %b11
+    i32 3, label %b15
+    i32 4, label %b20
+    i32 5, label %b30
+  ]
+
+b1:                                               ; preds = %b0
+  %v3 = load i16, i16* %a1, align 2, !tbaa !0
+  %v4 = icmp eq i16 %v3, -1
+  br i1 %v4, label %b2, label %b4
+
+b2:                                               ; preds = %b1
+  %v5 = load i16, i16* @g0, align 2, !tbaa !0
+  %v6 = add i16 %v5, 1
+  store i16 %v6, i16* @g0, align 2, !tbaa !0
+  %v7 = icmp sgt i16 %v6, 2
+  br i1 %v7, label %b3, label %b5
+
+b3:                                               ; preds = %b2
+  store i16 3, i16* @g0, align 2, !tbaa !0
+  br label %b35
+
+b4:                                               ; preds = %b1
+  store i16 0, i16* @g0, align 2, !tbaa !0
+  br label %b5
+
+b5:                                               ; preds = %b4, %b2
+  %v8 = load i16, i16* %a1, align 2, !tbaa !0
+  %v9 = icmp ne i16 %v8, 0
+  %v10 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @g1, i32 0, i32 0), align 2
+  %v11 = icmp eq i16 %v10, 0
+  %v12 = and i1 %v9, %v11
+  br i1 %v12, label %b6, label %b35
+
+b6:                                               ; preds = %b5
+  %v13 = bitcast i32* %v0 to i16*
+  %v14 = bitcast i32* %v1 to i16*
+  call void @f1(i16* %v13, i16* %v14, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([160 x i16], [160 x i16]* @g4, i32 0, i32 0))
+  %v15 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
+  %v16 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
+  %v17 = icmp sgt i16 %v15, %v16
+  %v18 = select i1 %v17, i16 %v15, i16 %v16
+  %v19 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 2), align 2, !tbaa !0
+  %v20 = icmp sgt i16 %v18, %v19
+  %v21 = select i1 %v20, i16 %v18, i16 %v19
+  %v22 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 3), align 2, !tbaa !0
+  %v23 = icmp sgt i16 %v21, %v22
+  %v24 = select i1 %v23, i16 %v21, i16 %v22
+  %v25 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 4), align 2, !tbaa !0
+  %v26 = icmp sle i16 %v24, %v25
+  %v27 = xor i1 %v23, true
+  %v28 = or i1 %v26, %v27
+  %v29 = select i1 %v26, i16 %v25, i16 %v22
+  %v30 = select i1 %v28, i16 %v29, i16 %v21
+  %v31 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 5), align 2, !tbaa !0
+  %v32 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v33 = icmp slt i16 %v31, %v32
+  %v34 = select i1 %v33, i16 %v31, i16 %v32
+  %v35 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 7), align 2, !tbaa !0
+  %v36 = icmp slt i16 %v34, %v35
+  %v37 = select i1 %v36, i16 %v34, i16 %v35
+  %v38 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 8), align 2, !tbaa !0
+  %v39 = icmp slt i16 %v37, %v38
+  %v40 = select i1 %v39, i16 %v37, i16 %v38
+  %v41 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 9), align 2, !tbaa !0
+  %v42 = icmp sge i16 %v40, %v41
+  %v43 = xor i1 %v39, true
+  %v44 = or i1 %v42, %v43
+  %v45 = select i1 %v42, i16 %v41, i16 %v38
+  %v46 = select i1 %v44, i16 %v45, i16 %v37
+  %v47 = icmp slt i16 %v30, %v46
+  br i1 %v47, label %b7, label %b35
+
+b7:                                               ; preds = %b6
+  %v48 = load i16, i16* @g5, align 2, !tbaa !0
+  %v49 = icmp eq i16 %v48, 4
+  %v50 = load i16, i16* @g6, align 2
+  %v51 = icmp eq i16 %v50, 0
+  %v52 = and i1 %v49, %v51
+  br i1 %v52, label %b35, label %b8
+
+b8:                                               ; preds = %b7
+  br label %b35
+
+b9:                                               ; preds = %b0
+  store i16 0, i16* @g0, align 2, !tbaa !0
+  %v53 = load i16, i16* %a1, align 2, !tbaa !0
+  %v54 = icmp eq i16 %v53, 0
+  %v55 = zext i1 %v54 to i16
+  %v56 = getelementptr i16, i16* %a1, i32 1
+  %v57 = load i16, i16* %v56, align 2, !tbaa !0
+  %v58 = icmp eq i16 %v57, 0
+  %v59 = zext i1 %v58 to i16
+  %v60 = add nuw nsw i16 %v59, %v55
+  %v61 = getelementptr inbounds i16, i16* %a1, i32 2
+  %v62 = load i16, i16* %v61, align 2, !tbaa !0
+  %v63 = icmp ult i16 %v62, 256
+  %v64 = zext i1 %v63 to i16
+  %v65 = add nuw nsw i16 %v64, %v60
+  %v66 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @g1, i32 0, i32 0), align 2
+  %v67 = icmp eq i16 %v65, 3
+  %v68 = icmp ne i16 %v66, 0
+  %v69 = or i1 %v68, %v67
+  %v70 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 9), align 2
+  %v71 = icmp eq i16 %v70, 3
+  %v72 = or i1 %v71, %v69
+  br i1 %v72, label %b35, label %b10
+
+b10:                                              ; preds = %b9
+  br label %b35
+
+b11:                                              ; preds = %b0
+  store i16 0, i16* @g0, align 2, !tbaa !0
+  %v73 = load i16, i16* %a1, align 2, !tbaa !0
+  %v74 = icmp eq i16 %v73, 0
+  %v75 = zext i1 %v74 to i16
+  %v76 = getelementptr i16, i16* %a1, i32 1
+  %v77 = load i16, i16* %v76, align 2, !tbaa !0
+  %v78 = icmp eq i16 %v77, 0
+  %v79 = zext i1 %v78 to i16
+  %v80 = add nuw nsw i16 %v79, %v75
+  %v81 = getelementptr inbounds i16, i16* %a1, i32 2
+  %v82 = load i16, i16* %v81, align 2, !tbaa !0
+  %v83 = icmp ult i16 %v82, 256
+  %v84 = zext i1 %v83 to i16
+  %v85 = add nuw nsw i16 %v84, %v80
+  %v86 = icmp ne i16 %v85, 3
+  %v87 = load i16, i16* getelementptr inbounds ([2 x i16], [2 x i16]* @g1, i32 0, i32 0), align 2
+  %v88 = icmp eq i16 %v87, 0
+  %v89 = and i1 %v88, %v86
+  br i1 %v89, label %b12, label %b35
+
+b12:                                              ; preds = %b11
+  %v90 = load i16, i16* @g5, align 2, !tbaa !0
+  switch i16 %v90, label %b14 [
+    i16 1, label %b35
+    i16 2, label %b13
+  ]
+
+b13:                                              ; preds = %b12
+  %v91 = load i16, i16* @g7, align 2, !tbaa !0
+  %v92 = load i16, i16* @g6, align 2
+  %v93 = or i16 %v92, %v91
+  %v94 = icmp eq i16 %v93, 0
+  br i1 %v94, label %b35, label %b14
+
+b14:                                              ; preds = %b13, %b12
+  br label %b35
+
+b15:                                              ; preds = %b0
+  store i16 0, i16* @g0, align 2, !tbaa !0
+  %v95 = load i16, i16* %a1, align 2, !tbaa !0
+  %v96 = icmp eq i16 %v95, 0
+  %v97 = zext i1 %v96 to i16
+  %v98 = getelementptr i16, i16* %a1, i32 1
+  %v99 = load i16, i16* %v98, align 2, !tbaa !0
+  %v100 = icmp eq i16 %v99, 0
+  %v101 = zext i1 %v100 to i16
+  %v102 = add nuw nsw i16 %v101, %v97
+  %v103 = getelementptr i16, i16* %a1, i32 2
+  %v104 = load i16, i16* %v103, align 2, !tbaa !0
+  %v105 = icmp eq i16 %v104, 0
+  %v106 = zext i1 %v105 to i16
+  %v107 = add nuw nsw i16 %v106, %v102
+  %v108 = getelementptr i16, i16* %a1, i32 3
+  %v109 = load i16, i16* %v108, align 2, !tbaa !0
+  %v110 = icmp eq i16 %v109, 0
+  %v111 = zext i1 %v110 to i16
+  %v112 = add nuw nsw i16 %v111, %v107
+  %v113 = getelementptr i16, i16* %a1, i32 4
+  %v114 = load i16, i16* %v113, align 2, !tbaa !0
+  %v115 = icmp eq i16 %v114, 0
+  %v116 = zext i1 %v115 to i16
+  %v117 = add nuw nsw i16 %v116, %v112
+  %v118 = icmp eq i16 %v117, 5
+  br i1 %v118, label %b35, label %b16
+
+b16:                                              ; preds = %b15
+  %v119 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 3), align 2, !tbaa !4
+  switch i16 %v119, label %b17 [
+    i16 120, label %b19
+    i16 115, label %b19
+  ]
+
+b17:                                              ; preds = %b16
+  %v120 = icmp sgt i16 %v119, 100
+  br i1 %v120, label %b35, label %b18
+
+b18:                                              ; preds = %b17
+  tail call void @f2(i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0))
+  %v121 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
+  %v122 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
+  %v123 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 2), align 2, !tbaa !0
+  %v124 = icmp sgt i16 %v122, %v123
+  %v125 = select i1 %v124, i16 %v122, i16 %v123
+  %v126 = icmp sgt i16 %v121, %v125
+  %v127 = select i1 %v126, i16 %v121, i16 %v125
+  %v128 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v129 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 7), align 2, !tbaa !0
+  %v130 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 8), align 2, !tbaa !0
+  %v131 = icmp slt i16 %v129, %v130
+  %v132 = select i1 %v131, i16 %v129, i16 %v130
+  %v133 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 9), align 2, !tbaa !0
+  %v134 = icmp slt i16 %v132, %v133
+  %v135 = select i1 %v134, i16 %v132, i16 %v133
+  %v136 = icmp slt i16 %v128, %v135
+  %v137 = select i1 %v136, i16 %v128, i16 %v135
+  %v138 = icmp slt i16 %v127, %v137
+  br i1 %v138, label %b19, label %b35
+
+b19:                                              ; preds = %b18, %b16, %b16
+  br label %b35
+
+b20:                                              ; preds = %b0
+  store i16 0, i16* @g0, align 2, !tbaa !0
+  %v139 = load i16, i16* %a1, align 2, !tbaa !0
+  %v140 = icmp eq i16 %v139, 0
+  %v141 = zext i1 %v140 to i16
+  %v142 = getelementptr i16, i16* %a1, i32 1
+  %v143 = load i16, i16* %v142, align 2, !tbaa !0
+  %v144 = icmp eq i16 %v143, 0
+  %v145 = zext i1 %v144 to i16
+  %v146 = add nuw nsw i16 %v145, %v141
+  %v147 = getelementptr i16, i16* %a1, i32 2
+  %v148 = load i16, i16* %v147, align 2, !tbaa !0
+  %v149 = icmp eq i16 %v148, 0
+  %v150 = zext i1 %v149 to i16
+  %v151 = add nuw nsw i16 %v150, %v146
+  %v152 = getelementptr i16, i16* %a1, i32 3
+  %v153 = load i16, i16* %v152, align 2, !tbaa !0
+  %v154 = icmp eq i16 %v153, 0
+  %v155 = zext i1 %v154 to i16
+  %v156 = add nuw nsw i16 %v155, %v151
+  %v157 = getelementptr i16, i16* %a1, i32 4
+  %v158 = load i16, i16* %v157, align 2, !tbaa !0
+  %v159 = icmp eq i16 %v158, 0
+  %v160 = zext i1 %v159 to i16
+  %v161 = add nuw nsw i16 %v160, %v156
+  %v162 = getelementptr i16, i16* %a1, i32 5
+  %v163 = load i16, i16* %v162, align 2, !tbaa !0
+  %v164 = icmp eq i16 %v163, 0
+  %v165 = zext i1 %v164 to i16
+  %v166 = add nuw nsw i16 %v165, %v161
+  %v167 = getelementptr i16, i16* %a1, i32 6
+  %v168 = load i16, i16* %v167, align 2, !tbaa !0
+  %v169 = icmp eq i16 %v168, 0
+  %v170 = zext i1 %v169 to i16
+  %v171 = add nuw nsw i16 %v170, %v166
+  %v172 = getelementptr i16, i16* %a1, i32 7
+  %v173 = load i16, i16* %v172, align 2, !tbaa !0
+  %v174 = icmp eq i16 %v173, 0
+  %v175 = zext i1 %v174 to i16
+  %v176 = add i16 %v175, %v171
+  %v177 = getelementptr i16, i16* %a1, i32 8
+  %v178 = load i16, i16* %v177, align 2, !tbaa !0
+  %v179 = icmp eq i16 %v178, 0
+  %v180 = zext i1 %v179 to i16
+  %v181 = add i16 %v180, %v176
+  %v182 = getelementptr i16, i16* %a1, i32 9
+  %v183 = load i16, i16* %v182, align 2, !tbaa !0
+  %v184 = icmp eq i16 %v183, 0
+  %v185 = zext i1 %v184 to i16
+  %v186 = add i16 %v185, %v181
+  %v187 = getelementptr inbounds i16, i16* %a1, i32 10
+  %v188 = load i16, i16* %v187, align 2, !tbaa !0
+  %v189 = icmp ult i16 %v188, 32
+  %v190 = zext i1 %v189 to i16
+  %v191 = add i16 %v190, %v186
+  %v192 = icmp eq i16 %v191, 11
+  br i1 %v192, label %b35, label %b21
+
+b21:                                              ; preds = %b20
+  tail call void @f3(i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0))
+  %v193 = load i16, i16* @g8, align 2, !tbaa !0
+  %v194 = icmp eq i16 %v193, 0
+  br i1 %v194, label %b22, label %b35
+
+b22:                                              ; preds = %b21
+  %v195 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 3), align 2, !tbaa !4
+  %v196 = icmp sgt i16 %v195, 100
+  br i1 %v196, label %b35, label %b23
+
+b23:                                              ; preds = %b22
+  %v197 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
+  %v198 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
+  %v199 = icmp sgt i16 %v197, %v198
+  %v200 = select i1 %v199, i16 %v197, i16 %v198
+  %v201 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 4), align 2, !tbaa !0
+  %v202 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 5), align 2, !tbaa !0
+  %v203 = icmp slt i16 %v201, %v202
+  %v204 = select i1 %v203, i16 %v201, i16 %v202
+  %v205 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v206 = icmp slt i16 %v204, %v205
+  %v207 = select i1 %v206, i16 %v204, i16 %v205
+  %v208 = icmp slt i16 %v200, %v207
+  br i1 %v208, label %b24, label %b35
+
+b24:                                              ; preds = %b23
+  %v209 = load i16, i16* @g5, align 2, !tbaa !0
+  switch i16 %v209, label %b26 [
+    i16 1, label %b35
+    i16 2, label %b25
+  ]
+
+b25:                                              ; preds = %b24
+  %v210 = load i16, i16* @g7, align 2, !tbaa !0
+  %v211 = load i16, i16* @g6, align 2
+  %v212 = or i16 %v211, %v210
+  %v213 = icmp eq i16 %v212, 0
+  br i1 %v213, label %b35, label %b27
+
+b26:                                              ; preds = %b24
+  %v214 = load i16, i16* @g6, align 2
+  %v215 = icmp eq i16 %v214, 0
+  br i1 %v215, label %b28, label %b35
+
+b27:                                              ; preds = %b25
+  %v216 = load i16, i16* @g9, align 2
+  %v217 = icmp eq i16 %v216, 0
+  br i1 %v217, label %b28, label %b35
+
+b28:                                              ; preds = %b27, %b26
+  %v218 = tail call signext i16 @f4(i16 signext %v195, i16 signext 20)
+  store i16 %v218, i16* @g10, align 2, !tbaa !0
+  %v219 = load i16, i16* @g11, align 2, !tbaa !0
+  %v220 = tail call signext i16 @f6(i16 signext %v218, i16 signext %v219)
+  %v221 = tail call signext i16 @f5(i16 signext %v220)
+  %v222 = icmp sgt i16 %v221, 15
+  br i1 %v222, label %b29, label %b35
+
+b29:                                              ; preds = %b28
+  call void @llvm.memset.p0i8.i32(i8* align 2 bitcast ([192 x i16]* @g12 to i8*), i8 0, i32 256, i1 false)
+  call void @llvm.memset.p0i8.i32(i8* align 4 bitcast ([10 x i32]* @g13 to i8*), i8 0, i32 40, i1 false)
+  tail call void @f7()
+  br label %b35
+
+b30:                                              ; preds = %b0
+  store i16 0, i16* @g0, align 2, !tbaa !0
+  %v223 = load i16, i16* %a1, align 2, !tbaa !0
+  %v224 = icmp eq i16 %v223, 0
+  %v225 = zext i1 %v224 to i16
+  %v226 = getelementptr i16, i16* %a1, i32 1
+  %v227 = load i16, i16* %v226, align 2, !tbaa !0
+  %v228 = icmp eq i16 %v227, 0
+  %v229 = zext i1 %v228 to i16
+  %v230 = add nuw nsw i16 %v229, %v225
+  %v231 = getelementptr i16, i16* %a1, i32 2
+  %v232 = load i16, i16* %v231, align 2, !tbaa !0
+  %v233 = icmp eq i16 %v232, 0
+  %v234 = zext i1 %v233 to i16
+  %v235 = add nuw nsw i16 %v234, %v230
+  %v236 = getelementptr i16, i16* %a1, i32 3
+  %v237 = load i16, i16* %v236, align 2, !tbaa !0
+  %v238 = icmp eq i16 %v237, 0
+  %v239 = zext i1 %v238 to i16
+  %v240 = add nuw nsw i16 %v239, %v235
+  %v241 = getelementptr i16, i16* %a1, i32 4
+  %v242 = load i16, i16* %v241, align 2, !tbaa !0
+  %v243 = icmp eq i16 %v242, 0
+  %v244 = zext i1 %v243 to i16
+  %v245 = add nuw nsw i16 %v244, %v240
+  %v246 = getelementptr i16, i16* %a1, i32 5
+  %v247 = load i16, i16* %v246, align 2, !tbaa !0
+  %v248 = icmp eq i16 %v247, 0
+  %v249 = zext i1 %v248 to i16
+  %v250 = add nuw nsw i16 %v249, %v245
+  %v251 = getelementptr i16, i16* %a1, i32 6
+  %v252 = load i16, i16* %v251, align 2, !tbaa !0
+  %v253 = icmp eq i16 %v252, 0
+  %v254 = zext i1 %v253 to i16
+  %v255 = add nuw nsw i16 %v254, %v250
+  %v256 = getelementptr i16, i16* %a1, i32 7
+  %v257 = load i16, i16* %v256, align 2, !tbaa !0
+  %v258 = icmp eq i16 %v257, 0
+  %v259 = zext i1 %v258 to i16
+  %v260 = add i16 %v259, %v255
+  %v261 = getelementptr i16, i16* %a1, i32 8
+  %v262 = load i16, i16* %v261, align 2, !tbaa !0
+  %v263 = icmp eq i16 %v262, 0
+  %v264 = zext i1 %v263 to i16
+  %v265 = add i16 %v264, %v260
+  %v266 = getelementptr i16, i16* %a1, i32 9
+  %v267 = load i16, i16* %v266, align 2, !tbaa !0
+  %v268 = icmp eq i16 %v267, 0
+  %v269 = zext i1 %v268 to i16
+  %v270 = add i16 %v269, %v265
+  %v271 = getelementptr inbounds i16, i16* %a1, i32 10
+  %v272 = load i16, i16* %v271, align 2, !tbaa !0
+  %v273 = icmp ult i16 %v272, 32
+  %v274 = zext i1 %v273 to i16
+  %v275 = add i16 %v274, %v270
+  %v276 = icmp eq i16 %v275, 11
+  br i1 %v276, label %b35, label %b31
+
+b31:                                              ; preds = %b30
+  tail call void @f3(i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 2, i32 0), i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0))
+  %v277 = load i16, i16* @g14, align 2, !tbaa !0
+  %v278 = icmp eq i16 %v277, 0
+  br i1 %v278, label %b32, label %b34
+
+b32:                                              ; preds = %b31
+  %v279 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g3, i32 0, i32 3), align 2, !tbaa !4
+  %v280 = icmp sgt i16 %v279, 100
+  br i1 %v280, label %b35, label %b33
+
+b33:                                              ; preds = %b32
+  %v281 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 0), align 2, !tbaa !0
+  %v282 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 1), align 2, !tbaa !0
+  %v283 = icmp sgt i16 %v281, %v282
+  %v284 = select i1 %v283, i16 %v281, i16 %v282
+  %v285 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 4), align 2, !tbaa !0
+  %v286 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 5), align 2, !tbaa !0
+  %v287 = icmp slt i16 %v285, %v286
+  %v288 = select i1 %v287, i16 %v285, i16 %v286
+  %v289 = load i16, i16* getelementptr inbounds ([10 x i16], [10 x i16]* @g2, i32 0, i32 6), align 2, !tbaa !0
+  %v290 = icmp slt i16 %v288, %v289
+  %v291 = select i1 %v290, i16 %v288, i16 %v289
+  %v292 = icmp slt i16 %v284, %v291
+  br i1 %v292, label %b34, label %b35
+
+b34:                                              ; preds = %b33, %b31
+  br label %b35
+
+b35:                                              ; preds = %b34, %b33, %b32, %b30, %b29, %b28, %b27, %b26, %b25, %b24, %b23, %b22, %b21, %b20, %b19, %b18, %b17, %b15, %b14, %b13, %b12, %b11, %b10, %b9, %b8, %b7, %b6, %b5, %b3, %b0
+  %v293 = phi i16 [ 0, %b34 ], [ 1, %b29 ], [ 0, %b19 ], [ 0, %b14 ], [ 0, %b10 ], [ 0, %b3 ], [ 0, %b8 ], [ 1, %b5 ], [ 1, %b6 ], [ 1, %b9 ], [ 1, %b11 ], [ 1, %b12 ], [ 1, %b15 ], [ 1, %b17 ], [ 1, %b18 ], [ 1, %b20 ], [ 1, %b22 ], [ 1, %b23 ], [ 1, %b24 ], [ 0, %b27 ], [ 0, %b28 ], [ 0, %b21 ], [ 1, %b30 ], [ 1, %b32 ], [ 1, %b33 ], [ 0, %b0 ], [ 1, %b7 ], [ 1, %b13 ], [ 1, %b25 ], [ 0, %b26 ]
+  ret i16 %v293
+}
+
+; Function Attrs: nounwind
+declare void @f1(i16*, i16*, i16*, i16*, i16*) #0
+
+; Function Attrs: nounwind
+declare void @f2(i16*, i16*) #0
+
+; Function Attrs: nounwind
+declare void @f3(i16*, i16*) #0
+
+; Function Attrs: nounwind
+declare signext i16 @f4(i16 signext, i16 signext) #0
+
+; Function Attrs: nounwind
+declare signext i16 @f5(i16 signext) #0
+
+; Function Attrs: nounwind
+declare signext i16 @f6(i16 signext, i16 signext) #0
+
+; Function Attrs: nounwind
+declare void @f7() #0
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i32(i8* nocapture writeonly, i8, i32, i1) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { argmemonly nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !1, i64 12}
+!5 = !{!"_ZTS6PACKET", !1, i64 0, !1, i64 2, !2, i64 4, !1, i64 12, !1, i64 14, !2, i64 16, !2, i64 22, !2, i64 46, !2, i64 52, !1, i64 60, !1, i64 62, !1, i64 64, !1, i64 66, !2, i64 68, !1, i64 72, !1, i64 74, !2, i64 76, !2, i64 82}

Added: llvm/trunk/test/CodeGen/Hexagon/opt-sext-intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/opt-sext-intrinsics.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/opt-sext-intrinsics.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/opt-sext-intrinsics.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK-NOT: sxth
+
+target triple = "hexagon"
+
+ at g0 = common global i32 0, align 4
+
+define i32 @f0(i32 %a0, i32 %a1) {
+b0:
+  %v0 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a0, i32 %a1)
+  %v1 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %a1, i32 %a0)
+  %v2 = shl i32 %v0, 16
+  %v3 = ashr exact i32 %v2, 16
+  %v4 = shl i32 %v1, 16
+  %v5 = ashr exact i32 %v4, 16
+  %v6 = tail call i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32 %v0, i32 %v1)
+  %v7 = shl i32 %v6, 16
+  %v8 = ashr exact i32 %v7, 16
+  %v9 = load i32, i32* @g0, align 4
+  %v10 = icmp ne i32 %v9, %v6
+  %v11 = zext i1 %v10 to i32
+  ret i32 %v11
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.A2.addh.l16.sat.ll(i32, i32) #0
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/packed-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/packed-store.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/packed-store.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/packed-store.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Honor the alignment of a halfword on byte boundaries.
+; CHECK-NOT: memh
+
+target triple = "hexagon-unknown-linux-gnu"
+
+%s.0 = type <{ i16, i8, i16 }>
+
+ at g0 = common global %s.0 zeroinitializer, align 1
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 %a0, i32* %v0, align 4
+  store i8 1, i8* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 1), align 1
+  %v1 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  %v2 = add i16 %v1, 1
+  store i16 %v2, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  %v3 = load i32, i32* %v0, align 4
+  %v4 = icmp ne i32 %v3, 0
+  br i1 %v4, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v5 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  %v6 = zext i16 %v5 to i32
+  %v7 = or i32 %v6, 6144
+  %v8 = trunc i32 %v7 to i16
+  store i16 %v8, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v9 = load i16, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  %v10 = zext i16 %v9 to i32
+  %v11 = or i32 %v10, 2048
+  %v12 = trunc i32 %v11 to i16
+  store i16 %v12, i16* getelementptr inbounds (%s.0, %s.0* @g0, i32 0, i32 2), align 1
+  br label %b3
+
+b3:                                               ; preds = %b2, %b1
+  ret i32 0
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/packetize-allocframe.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/packetize-allocframe.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/packetize-allocframe.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/packetize-allocframe.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,72 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; The purpose of this test is to make sure that the packetizer is ignoring
+; CFI instructions while forming packet for allocframe. Refer to 7d7d99622
+; which replaced PROLOG_LABEL with CFI_INSTRUCTION.
+
+ at g0 = external constant i8*
+
+; We used to emit:
+;      {
+;        allocframe(#0)
+;      }
+;      {
+;         r0 = #4
+; But we can put more instructions in the first packet.
+
+; CHECK:      {
+; CHECK-NEXT:   call f1
+; CHECK-NEXT:   r0 = #4
+; CHECK-NEXT:   allocframe(#0)
+; CHECK-NEXT: }
+
+define i32 @f0() personality i8* bitcast (i32 (...)* @f3 to i8*) {
+b0:
+  %v0 = tail call i8* @f1(i32 4) #1
+  %v1 = bitcast i8* %v0 to i32*
+  store i32 20, i32* %v1, align 4, !tbaa !0
+  invoke void @f2(i8* %v0, i8* bitcast (i8** @g0 to i8*), i8* null) #2
+          to label %b4 unwind label %b1
+
+b1:                                               ; preds = %b0
+  %v2 = landingpad { i8*, i32 }
+          catch i8* bitcast (i8** @g0 to i8*)
+  %v3 = extractvalue { i8*, i32 } %v2, 1
+  %v4 = tail call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @g0 to i8*)) #1
+  %v5 = icmp eq i32 %v3, %v4
+  br i1 %v5, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  %v6 = extractvalue { i8*, i32 } %v2, 0
+  %v7 = tail call i8* @f4(i8* %v6) #1
+  tail call void @f5() #1
+  ret i32 1
+
+b3:                                               ; preds = %b1
+  resume { i8*, i32 } %v2
+
+b4:                                               ; preds = %b0
+  unreachable
+}
+
+declare i8* @f1(i32)
+
+declare void @f2(i8*, i8*, i8*)
+
+declare i32 @f3(...)
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.eh.typeid.for(i8*) #0
+
+declare i8* @f4(i8*)
+
+declare void @f5()
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind }
+attributes #2 = { noreturn }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/packetize-call-r29.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/packetize-call-r29.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/packetize-call-r29.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/packetize-call-r29.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,25 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that the assignment to r29 does not occur in the same packet as the call.
+
+; CHECK: call
+; CHECK: }
+; CHECK: r29 = #0
+
+define protected void @f0(i8* %a0, i8* %a1, ...) local_unnamed_addr {
+b0:
+  call void @llvm.va_start(i8* nonnull undef)
+  call void @f1()
+  call void @llvm.stackrestore(i8* null)
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.va_start(i8*) #0
+
+declare protected void @f1() local_unnamed_addr
+
+; Function Attrs: nounwind
+declare void @llvm.stackrestore(i8*) #0
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/packetize-impdef-1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/packetize-impdef-1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/packetize-impdef-1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/packetize-impdef-1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,158 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Test that the compiler doesn't assert because IMPLICIT_DEF instructions are
+; are added to the same packet as a use. This test case asserts if the
+; IMPLICIT_DEFs are not handled properly.
+;
+; r0 = IMPLICIT_DEF
+; r1 = IMPLICIT_DEF
+; S2_storerd_io r29, 0, d0
+
+; CHECK: memd(r29+#0) = r{{[0-9]+}}:{{[0-9]+}}
+; CHECK: memd(r29+#0) = r{{[0-9]+}}:{{[0-9]+}}
+
+define i8** @f0(i8* %a0) local_unnamed_addr {
+b0:
+  %v0 = tail call i8* @f1(i32 0)
+  %v1 = tail call i8* @f1(i32 8)
+  %v2 = bitcast i8* %v1 to i8**
+  %v3 = load i32, i32* undef, align 4
+  %v4 = tail call i8* @f4(i8* %a0, i32 0, i32 %v3)
+  %v5 = sub nsw i32 %v3, 0
+  br label %b1
+
+b1:                                               ; preds = %b0
+  switch i8 undef, label %b3 [
+    i8 0, label %b4
+    i8 92, label %b2
+    i8 44, label %b4
+  ]
+
+b2:                                               ; preds = %b1
+  unreachable
+
+b3:                                               ; preds = %b1
+  unreachable
+
+b4:                                               ; preds = %b1, %b1
+  br label %b5
+
+b5:                                               ; preds = %b4
+  br i1 undef, label %b27, label %b6
+
+b6:                                               ; preds = %b5
+  %v6 = ptrtoint i8* %v4 to i32
+  %v7 = sub i32 0, %v6
+  %v8 = call i8* @f4(i8* nonnull %v4, i32 0, i32 %v7)
+  %v9 = call i8* @f4(i8* nonnull %v4, i32 undef, i32 %v5)
+  br label %b7
+
+b7:                                               ; preds = %b6
+  br i1 undef, label %b8, label %b9
+
+b8:                                               ; preds = %b7
+  br label %b9
+
+b9:                                               ; preds = %b8, %b7
+  %v10 = phi i32 [ 2, %b8 ], [ 0, %b7 ]
+  %v11 = load i8, i8* %v9, align 1
+  switch i8 %v11, label %b12 [
+    i8 43, label %b10
+    i8 45, label %b10
+  ]
+
+b10:                                              ; preds = %b9, %b9
+  br i1 undef, label %b11, label %b12
+
+b11:                                              ; preds = %b10
+  %v12 = call i64 @f6(i8* nonnull %v9, i8** nonnull undef, i32 10)
+  %v13 = load i8*, i8** undef, align 4
+  %v14 = ptrtoint i8* %v13 to i32
+  br label %b15
+
+b12:                                              ; preds = %b10, %b9
+  switch i8 undef, label %b14 [
+    i8 0, label %b13
+    i8 46, label %b13
+  ]
+
+b13:                                              ; preds = %b12, %b12
+  br label %b15
+
+b14:                                              ; preds = %b12
+  unreachable
+
+b15:                                              ; preds = %b13, %b11
+  %v15 = phi i32 [ undef, %b13 ], [ %v14, %b11 ]
+  %v16 = phi i32 [ 2, %b13 ], [ 1, %b11 ]
+  %v17 = phi i64 [ undef, %b13 ], [ %v12, %b11 ]
+  %v18 = call i32* @f5()
+  br label %b16
+
+b16:                                              ; preds = %b15
+  %v19 = icmp ne i32 %v10, %v16
+  %v20 = or i1 undef, %v19
+  br i1 %v20, label %b17, label %b18
+
+b17:                                              ; preds = %b16
+  call void @f2(i8* %v8)
+  br label %b27
+
+b18:                                              ; preds = %b16
+  br i1 undef, label %b19, label %b20
+
+b19:                                              ; preds = %b18
+  br label %b24
+
+b20:                                              ; preds = %b18
+  %v21 = add i32 %v5, -2
+  %v22 = sub i32 %v21, %v7
+  %v23 = add i32 %v22, %v15
+  %v24 = sub i32 %v23, 0
+  br label %b21
+
+b21:                                              ; preds = %b20
+  %v25 = icmp ne i32 %v24, 2
+  %v26 = and i1 %v25, undef
+  br i1 %v26, label %b22, label %b23
+
+b22:                                              ; preds = %b21
+  unreachable
+
+b23:                                              ; preds = %b21
+  br label %b24
+
+b24:                                              ; preds = %b23, %b19
+  %v27 = phi i64 [ 0, %b19 ], [ %v17, %b23 ]
+  br label %b25
+
+b25:                                              ; preds = %b24
+  %v28 = icmp sgt i64 undef, %v27
+  br i1 %v28, label %b28, label %b26
+
+b26:                                              ; preds = %b25
+  unreachable
+
+b27:                                              ; preds = %b17, %b5
+  call void @f2(i8* %v4)
+  call void @f2(i8* %v0)
+  %v29 = call i8* @f3(i8* undef, i8* nonnull %a0)
+  ret i8** %v2
+
+b28:                                              ; preds = %b25
+  call void @f2(i8* %v9)
+  unreachable
+}
+
+declare i8* @f1(i32) local_unnamed_addr
+
+declare void @f2(i8* nocapture) local_unnamed_addr
+
+declare i8* @f3(i8*, i8* nocapture readonly) local_unnamed_addr
+
+declare i8* @f4(i8*, i32, i32) local_unnamed_addr
+
+declare i32* @f5() local_unnamed_addr
+
+declare i64 @f6(i8*, i8**, i32) local_unnamed_addr

Added: llvm/trunk/test/CodeGen/Hexagon/packetize-impdef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/packetize-impdef.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/packetize-impdef.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/packetize-impdef.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,245 @@
+; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
+; REQUIRES: asserts
+;
+; Check that IMPLICIT_DEFs are packetized correctly
+; (previously caused an assert).
+;
+; CHECK: f1:
+
+%0 = type { i8 (i8)*, i8 (i8, %1*)*, i8 (i8)* }
+%1 = type { [16384 x i16], [8192 x i16], [8192 x i16], [8192 x i32], i32, i32, i32, %2, %2, i32, i32, i32, i32 }
+%2 = type { i32, i32, i32 }
+%3 = type { %4 }
+%4 = type { i32, i8* }
+%5 = type { i8, i32, i32, i32, i16, i16, i16, i16, i8, i16, %6, %6, i32, i16, i16, i16, i16, i8 }
+%6 = type { i32, i32, i32, i32, i32, i32, i32, i8, i8 }
+%7 = type { i8, i8, i8, i8, i32, i32, i32, i32, i32, i32, %2, %2, %8, i8 }
+%8 = type { %2, %2 }
+
+ at g0 = external hidden unnamed_addr constant [7 x %0], align 8
+ at g1 = external hidden global %1, align 4
+ at g2 = external hidden constant %3, align 4
+ at g3 = external hidden constant %3, align 4
+
+declare void @f0(%3*, i32, i32)
+
+define hidden fastcc i32 @f1(%5* %a0, %7* %a1, %2* %a2) {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  br label %b55
+
+b4:                                               ; preds = %b2
+  br i1 undef, label %b6, label %b5
+
+b5:                                               ; preds = %b4
+  %v0 = getelementptr inbounds %5, %5* %a0, i32 0, i32 1
+  br label %b7
+
+b6:                                               ; preds = %b4
+  br label %b55
+
+b7:                                               ; preds = %b52, %b5
+  %v1 = phi i32 [ undef, %b5 ], [ %v43, %b52 ]
+  %v2 = phi i32 [ 5, %b5 ], [ %v45, %b52 ]
+  %v3 = load i32, i32* undef, align 4
+  %v4 = load i32, i32* %v0, align 4
+  %v5 = sext i32 %v4 to i64
+  %v6 = sdiv i64 0, %v5
+  %v7 = trunc i64 %v6 to i32
+  %v8 = icmp slt i32 %v7, 204800
+  br i1 %v8, label %b8, label %b9
+
+b8:                                               ; preds = %b7
+  call void @f0(%3* @g2, i32 %v3, i32 %v4)
+  br label %b54
+
+b9:                                               ; preds = %b7
+  %v9 = load i8, i8* undef, align 1
+  %v10 = zext i8 %v9 to i32
+  br i1 undef, label %b10, label %b11
+
+b10:                                              ; preds = %b9
+  br label %b47
+
+b11:                                              ; preds = %b9
+  br i1 undef, label %b12, label %b47
+
+b12:                                              ; preds = %b11
+  br i1 undef, label %b13, label %b47
+
+b13:                                              ; preds = %b12
+  %v11 = getelementptr inbounds [7 x %0], [7 x %0]* @g0, i32 0, i32 %v10, i32 2
+  %v12 = load i8 (i8)*, i8 (i8)** %v11, align 4
+  %v13 = call zeroext i8 %v12(i8 zeroext %v9)
+  br i1 undef, label %b14, label %b47
+
+b14:                                              ; preds = %b13
+  br i1 undef, label %b15, label %b16
+
+b15:                                              ; preds = %b14
+  br label %b46
+
+b16:                                              ; preds = %b14
+  br i1 false, label %b17, label %b22
+
+b17:                                              ; preds = %b16
+  br i1 undef, label %b18, label %b19
+
+b18:                                              ; preds = %b17
+  unreachable
+
+b19:                                              ; preds = %b17
+  br label %b20
+
+b20:                                              ; preds = %b20, %b19
+  br i1 undef, label %b20, label %b21
+
+b21:                                              ; preds = %b20
+  unreachable
+
+b22:                                              ; preds = %b16
+  br i1 false, label %b23, label %b24
+
+b23:                                              ; preds = %b22
+  br label %b47
+
+b24:                                              ; preds = %b22
+  br i1 false, label %b25, label %b26
+
+b25:                                              ; preds = %b24
+  unreachable
+
+b26:                                              ; preds = %b24
+  br label %b27
+
+b27:                                              ; preds = %b36, %b26
+  %v14 = phi i32 [ 16, %b26 ], [ %v30, %b36 ]
+  %v15 = getelementptr inbounds %1, %1* @g1, i32 0, i32 2, i32 %v14
+  %v16 = load i16, i16* %v15, align 2
+  %v17 = sext i16 %v16 to i32
+  %v18 = select i1 undef, i32 undef, i32 %v17
+  %v19 = sext i32 %v18 to i64
+  %v20 = or i32 %v18, undef
+  br i1 false, label %b28, label %b29
+
+b28:                                              ; preds = %b27
+  unreachable
+
+b29:                                              ; preds = %b27
+  br i1 false, label %b30, label %b31
+
+b30:                                              ; preds = %b29
+  unreachable
+
+b31:                                              ; preds = %b29
+  %v21 = mul nsw i64 undef, %v19
+  %v22 = sdiv i64 0, %v19
+  %v23 = add nsw i64 %v22, 0
+  %v24 = lshr i64 %v23, 5
+  %v25 = trunc i64 %v24 to i32
+  %v26 = sub nsw i32 1608, %v25
+  %v27 = icmp sgt i16 %v16, -1
+  %v28 = and i1 undef, %v27
+  br i1 %v28, label %b32, label %b33
+
+b32:                                              ; preds = %b31
+  store i32 %v26, i32* undef, align 4
+  br label %b36
+
+b33:                                              ; preds = %b31
+  br i1 undef, label %b34, label %b35
+
+b34:                                              ; preds = %b33
+  %v29 = getelementptr inbounds %1, %1* @g1, i32 0, i32 3, i32 %v14
+  store i32 undef, i32* %v29, align 4
+  br label %b36
+
+b35:                                              ; preds = %b33
+  br label %b36
+
+b36:                                              ; preds = %b35, %b34, %b32
+  %v30 = add nuw nsw i32 %v14, 1
+  %v31 = icmp ult i32 %v30, 8192
+  br i1 %v31, label %b27, label %b37
+
+b37:                                              ; preds = %b36
+  br label %b38
+
+b38:                                              ; preds = %b38, %b37
+  br i1 undef, label %b38, label %b39
+
+b39:                                              ; preds = %b38
+  br i1 false, label %b40, label %b41
+
+b40:                                              ; preds = %b39
+  unreachable
+
+b41:                                              ; preds = %b39
+  %v32 = icmp ult i8 %v9, 6
+  br i1 %v32, label %b43, label %b42
+
+b42:                                              ; preds = %b41
+  br label %b47
+
+b43:                                              ; preds = %b41
+  %v33 = load i64, i64* undef, align 8
+  br label %b44
+
+b44:                                              ; preds = %b44, %b43
+  br i1 undef, label %b45, label %b44
+
+b45:                                              ; preds = %b44
+  %v34 = sdiv i64 undef, %v33
+  %v35 = trunc i64 %v34 to i32
+  %v36 = add nsw i32 0, %v3
+  %v37 = sext i32 %v36 to i64
+  %v38 = mul nsw i64 %v37, 4096000
+  %v39 = sdiv i64 %v38, 0
+  %v40 = trunc i64 %v39 to i32
+  br label %b46
+
+b46:                                              ; preds = %b45, %b15
+  %v41 = phi i32 [ undef, %b15 ], [ %v40, %b45 ]
+  br label %b47
+
+b47:                                              ; preds = %b46, %b42, %b23, %b13, %b12, %b11, %b10
+  %v42 = phi i8 [ 1, %b10 ], [ 0, %b46 ], [ 3, %b23 ], [ 1, %b42 ], [ %v13, %b13 ], [ undef, %b12 ], [ undef, %b11 ]
+  %v43 = phi i32 [ %v1, %b10 ], [ %v41, %b46 ], [ %v1, %b23 ], [ %v1, %b42 ], [ %v1, %b13 ], [ %v1, %b12 ], [ %v1, %b11 ]
+  %v44 = icmp eq i8 %v42, 1
+  br i1 %v44, label %b48, label %b49
+
+b48:                                              ; preds = %b47
+  br label %b54
+
+b49:                                              ; preds = %b47
+  br i1 undef, label %b50, label %b52
+
+b50:                                              ; preds = %b49
+  br i1 undef, label %b51, label %b53
+
+b51:                                              ; preds = %b50
+  br label %b52
+
+b52:                                              ; preds = %b51, %b49
+  %v45 = add nsw i32 %v2, -1
+  %v46 = icmp eq i32 %v45, 0
+  br i1 %v46, label %b54, label %b7
+
+b53:                                              ; preds = %b50
+  call void @f0(%3* @g3, i32 %v43, i32 undef)
+  unreachable
+
+b54:                                              ; preds = %b52, %b48, %b8
+  unreachable
+
+b55:                                              ; preds = %b6, %b3
+  ret i32 0
+}

Added: llvm/trunk/test/CodeGen/Hexagon/packetize-l2fetch.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/packetize-l2fetch.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/packetize-l2fetch.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/packetize-l2fetch.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,34 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Check that this testcase compiles successfully.
+; Because l2fetch has mayLoad/mayStore flags on it, the packetizer
+; was tricked into thinking that it's a store. The v65-specific
+; code dealing with mem_shuff allowed it to be packetized together
+; with the load.
+
+; CHECK: l2fetch
+
+target triple = "hexagon"
+
+ at g0 = external global [32768 x i8], align 8
+ at g1 = external local_unnamed_addr global [15 x i8*], align 8
+
+; Function Attrs: nounwind
+define void @f0() local_unnamed_addr #0 {
+b0:
+  store i8* inttoptr (i32 and (i32 sext (i8 ptrtoint (i8* getelementptr inbounds ([32768 x i8], [32768 x i8]* @g0, i32 0, i32 10000) to i8) to i32), i32 -65536) to i8*), i8** getelementptr inbounds ([15 x i8*], [15 x i8*]* @g1, i32 0, i32 1), align 4
+  store i8* inttoptr (i32 and (i32 sext (i8 ptrtoint (i8* getelementptr inbounds ([32768 x i8], [32768 x i8]* @g0, i32 0, i32 10000) to i8) to i32), i32 -65536) to i8*), i8** getelementptr inbounds ([15 x i8*], [15 x i8*]* @g1, i32 0, i32 6), align 8
+  tail call void @f1()
+  %v0 = load i8*, i8** getelementptr inbounds ([15 x i8*], [15 x i8*]* @g1, i32 0, i32 0), align 8
+  tail call void @llvm.hexagon.Y5.l2fetch(i8* %v0, i64 -9223372036854775808)
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.hexagon.Y5.l2fetch(i8*, i64) #1
+
+; Function Attrs: nounwind
+declare void @f1() #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv65" }
+attributes #1 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/packetize-volatiles.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/packetize-volatiles.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/packetize-volatiles.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/packetize-volatiles.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,29 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK: mem{{.*}} = {{.*}}.new
+
+target triple = "hexagon-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture %a0, i8* nocapture %a1) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i8* [ %a1, %b0 ], [ %v2, %b1 ]
+  %v1 = phi i8* [ %a0, %b0 ], [ %v4, %b1 ]
+  %v2 = getelementptr inbounds i8, i8* %v0, i32 1
+  %v3 = load volatile i8, i8* %v0, align 1, !tbaa !0
+  %v4 = getelementptr inbounds i8, i8* %v1, i32 1
+  store volatile i8 %v3, i8* %v1, align 1, !tbaa !0
+  %v5 = icmp eq i8 %v3, 0
+  br i1 %v5, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/peephole-move-phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/peephole-move-phi.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/peephole-move-phi.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/peephole-move-phi.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,53 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Splitting live ranges of vector predicate registers (in hexagon-peephole)
+; moved a PHI instruction into the middle of another basic block causing a
+; crash later on. Make sure this does not happen and that the testcase
+; compiles successfully.
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0() local_unnamed_addr #0 {
+b0:
+  %v0 = icmp eq i32 undef, 0
+  br i1 %v0, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  %v1 = tail call <1024 x i1> @llvm.hexagon.V6.pred.not.128B(<1024 x i1> undef) #2
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v2 = phi <1024 x i1> [ %v1, %b1 ], [ undef, %b0 ]
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v3 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %v2, <32 x i32> undef, <32 x i32> undef) #2
+  %v4 = tail call <32 x i32> @llvm.hexagon.V6.vor.128B(<32 x i32> undef, <32 x i32> %v3) #2
+  %v5 = tail call <32 x i32> @llvm.hexagon.V6.vor.128B(<32 x i32> %v4, <32 x i32> undef) #2
+  %v6 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v5, <32 x i32> undef) #2
+  %v7 = tail call <1024 x i1> @llvm.hexagon.V6.pred.or.128B(<1024 x i1> %v6, <1024 x i1> undef) #2
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %v7, <32 x i32> undef, <32 x i32> undef) #2
+  tail call void asm sideeffect "if($0) vmem($1)=$2;", "q,r,v,~{memory}"(<32 x i32> undef, <32 x i32>* undef, <32 x i32> %v8) #2
+  br label %b3
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1>, <32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <1024 x i1> @llvm.hexagon.V6.pred.or.128B(<1024 x i1>, <1024 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare <1024 x i1> @llvm.hexagon.V6.pred.not.128B(<1024 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vor.128B(<32 x i32>, <32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/phi-elim.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/phi-elim.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/phi-elim.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/phi-elim.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,25 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Check that the verifier doesn't fail due to incorrect
+; ordering of registers caused by PHI elimination.
+
+; Function Attrs: readnone
+define i32 @f0(i32 %a0, i32 %a1, i32 %a2) #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi i32 [ %a1, %b0 ], [ %v2, %b1 ]
+  %v1 = phi i32 [ 0, %b0 ], [ %v4, %b1 ]
+  %v2 = phi i32 [ %a0, %b0 ], [ %v0, %b1 ]
+  %v3 = icmp slt i32 %v1, %a2
+  %v4 = add nsw i32 %v1, 1
+  br i1 %v3, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  %v5 = add nsw i32 %v2, %v0
+  ret i32 %v5
+}
+
+attributes #0 = { readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/pic-jt-big.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/pic-jt-big.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/pic-jt-big.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/pic-jt-big.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,78 @@
+; RUN: llc -march=hexagon -relocation-model=pic < %s | FileCheck %s
+
+; CHECK: r{{[0-9]+}} = add({{pc|PC}},##.LJTI{{[0-9_]+}}@PCREL)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]}}+##g0 at GOT
+; CHECK: r{{[0-9]+}} = add({{pc|PC}},##_GLOBAL_OFFSET_TABLE_ at PCREL)
+; CHECK: r{{[0-9]+}} = memw(r{{[0-9]*}}+##g1 at GOT)
+
+ at g0 = external global i32
+ at g1 = external global i32
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0) #0 {
+b0:
+  switch i32 %a0, label %b8 [
+    i32 2, label %b1
+    i32 3, label %b2
+    i32 4, label %b3
+    i32 5, label %b4
+    i32 6, label %b5
+    i32 7, label %b6
+    i32 8, label %b7
+  ]
+
+b1:                                               ; preds = %b0
+  tail call void bitcast (void (...)* @f1 to void ()*)() #0
+  br label %b8
+
+b2:                                               ; preds = %b0
+  %v0 = load i32, i32* @g0, align 4, !tbaa !0
+  %v1 = add nsw i32 %v0, 99
+  br label %b9
+
+b3:                                               ; preds = %b0
+  %v2 = load i32, i32* @g1, align 4, !tbaa !0
+  %v3 = load i32, i32* @g0, align 4, !tbaa !0
+  %v4 = add nsw i32 %v3, %v2
+  tail call void @f2(i32 %v4) #0
+  br label %b8
+
+b4:                                               ; preds = %b0
+  %v5 = load i32, i32* @g1, align 4, !tbaa !0
+  %v6 = load i32, i32* @g0, align 4, !tbaa !0
+  %v7 = mul nsw i32 %v6, 2
+  %v8 = add i32 %v5, 9
+  %v9 = add i32 %v8, %v7
+  tail call void @f2(i32 %v9) #0
+  br label %b8
+
+b5:                                               ; preds = %b0
+  br label %b8
+
+b6:                                               ; preds = %b0
+  br label %b7
+
+b7:                                               ; preds = %b6, %b0
+  %v10 = phi i32 [ 2, %b0 ], [ 4, %b6 ]
+  br label %b8
+
+b8:                                               ; preds = %b7, %b5, %b4, %b3, %b1, %b0
+  %v11 = phi i32 [ %a0, %b0 ], [ %v10, %b7 ], [ 7, %b5 ], [ 5, %b4 ], [ 4, %b3 ], [ 2, %b1 ]
+  %v12 = add nsw i32 %v11, 522
+  br label %b9
+
+b9:                                               ; preds = %b8, %b2
+  %v13 = phi i32 [ %v12, %b8 ], [ %v1, %b2 ]
+  ret i32 %v13
+}
+
+declare void @f1(...)
+
+declare void @f2(i32)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/pmpyw_acc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/pmpyw_acc.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/pmpyw_acc.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/pmpyw_acc.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,49 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]+}}:{{[0-9]+}} ^= pmpyw(r{{[0-9]+}},r{{[0-9]+}})
+
+; Function Attrs: nounwind
+define i32 @f0(i32 %a0, i32 %a1, i32 %a2, i32 %a3) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i32, align 4
+  %v3 = alloca i32, align 4
+  %v4 = alloca i64, align 8
+  %v5 = alloca i64, align 8
+  store i32 %a0, i32* %v0, align 4
+  store i32 %a1, i32* %v1, align 4
+  store i32 %a2, i32* %v2, align 4
+  store i32 %a3, i32* %v3, align 4
+  %v6 = load i32, i32* %v0, align 4
+  %v7 = load i32, i32* %v1, align 4
+  %v8 = call i64 @llvm.hexagon.M4.pmpyw(i32 %v6, i32 %v7)
+  store i64 %v8, i64* %v5, align 8
+  %v9 = load i64, i64* %v5, align 8
+  store i64 %v9, i64* %v4, align 8
+  %v10 = load i64, i64* %v5, align 8
+  %v11 = load i32, i32* %v3, align 4
+  %v12 = load i64, i64* %v5, align 8
+  %v13 = lshr i64 %v12, 32
+  %v14 = trunc i64 %v13 to i32
+  %v15 = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %v10, i32 %v11, i32 %v14)
+  store i64 %v15, i64* %v5, align 8
+  %v16 = load i64, i64* %v4, align 8
+  %v17 = load i64, i64* %v5, align 8
+  %v18 = lshr i64 %v17, 32
+  %v19 = trunc i64 %v18 to i32
+  %v20 = load i32, i32* %v2, align 4
+  %v21 = call i64 @llvm.hexagon.M4.pmpyw.acc(i64 %v16, i32 %v19, i32 %v20)
+  store i64 %v21, i64* %v4, align 8
+  %v22 = load i64, i64* %v4, align 8
+  %v23 = trunc i64 %v22 to i32
+  ret i32 %v23
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M4.pmpyw(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M4.pmpyw.acc(i64, i32, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/postinc-aggr-dag-cycle.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,104 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+; Check for successful compilation.
+
+target triple = "hexagon"
+
+%s.0 = type { [1 x i32] }
+%s.1 = type { %s.2, i8, %s.6 }
+%s.2 = type { %s.3 }
+%s.3 = type { %s.4 }
+%s.4 = type { %s.5 }
+%s.5 = type { i32 }
+%s.6 = type { %s.6*, %s.6* }
+
+ at g0 = external constant %s.0*
+ at g1 = external global i32
+ at g2 = internal global %s.1 zeroinitializer, section ".data..percpu", align 4
+ at g3 = external global [3 x i32]
+ at g4 = private unnamed_addr constant [29 x i8] c"BUG: failure at %s:%d/%s()!\0A\00", align 1
+ at g5 = private unnamed_addr constant [22 x i8] c"kernel/stop_machine.c\00", align 1
+ at g6 = private unnamed_addr constant [14 x i8] c"cpu_stop_init\00", align 1
+ at g7 = private unnamed_addr constant [5 x i8] c"BUG!\00", align 1
+
+; Function Attrs: nounwind
+define internal i32 @f0() #0 section ".init.text" {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = load %s.0*, %s.0** @g0, align 4, !tbaa !0
+  %v2 = getelementptr inbounds %s.0, %s.0* %v1, i32 0, i32 0, i32 0
+  %v3 = tail call i32 @f1(i32* %v2, i32 3, i32 0) #0
+  %v4 = load i32, i32* @g1, align 4, !tbaa !4
+  %v5 = icmp ult i32 %v3, %v4
+  br i1 %v5, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v6 = phi i32 [ %v18, %b2 ], [ %v3, %b1 ]
+  %v7 = tail call i32 asm "", "=r,0"(%s.1* @g2) #0, !srcloc !6
+  %v8 = getelementptr inbounds [3 x i32], [3 x i32]* @g3, i32 0, i32 %v6
+  %v9 = load i32, i32* %v8, align 4, !tbaa !7
+  %v10 = add i32 %v9, %v7
+  %v11 = inttoptr i32 %v10 to %s.1*
+  store volatile i32 0, i32* %v0, align 4
+  %v12 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
+  %v13 = load volatile i32, i32* %v0, align 4
+  store volatile i32 %v13, i32* %v12, align 4
+  %v14 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 2
+  %v15 = getelementptr inbounds %s.6, %s.6* %v14, i32 0, i32 0
+  store %s.6* %v14, %s.6** %v15, align 4, !tbaa !9
+  %v16 = getelementptr inbounds %s.1, %s.1* %v11, i32 0, i32 2, i32 1
+  store %s.6* %v14, %s.6** %v16, align 4, !tbaa !11
+  %v17 = add i32 %v6, 1
+  %v18 = tail call i32 @f1(i32* %v2, i32 3, i32 %v17) #0
+  %v19 = load i32, i32* @g1, align 4, !tbaa !4
+  %v20 = icmp ult i32 %v18, %v19
+  br i1 %v20, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  br label %b4
+
+b4:                                               ; preds = %b3, %b0
+  %v21 = tail call i32 @f2() #0
+  %v22 = icmp eq i32 %v21, 0
+  br i1 %v22, label %b6, label %b5, !prof !12
+
+b5:                                               ; preds = %b4
+  %v23 = tail call i32 (i8*, ...) @f3(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @g4, i32 0, i32 0), i8* getelementptr inbounds ([22 x i8], [22 x i8]* @g5, i32 0, i32 0), i32 354, i8* getelementptr inbounds ([14 x i8], [14 x i8]* @g6, i32 0, i32 0)) #0
+  tail call void (i8*, ...) @f4(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @g7, i32 0, i32 0)) #1
+  unreachable
+
+b6:                                               ; preds = %b4
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare i32 @f1(i32*, i32, i32) #0
+
+; Function Attrs: nounwind
+declare i32 @f2() #0
+
+; Function Attrs: nounwind
+declare i32 @f3(i8*, ...) #0
+
+; Function Attrs: noreturn
+declare void @f4(i8*, ...) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { noreturn }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !2, i64 0}
+!6 = !{i32 521672}
+!7 = !{!8, !8, i64 0}
+!8 = !{!"long", !2, i64 0}
+!9 = !{!10, !1, i64 0}
+!10 = !{!"list_head", !1, i64 0, !1, i64 4}
+!11 = !{!10, !1, i64 4}
+!12 = !{!"branch_weights", i32 64, i32 4}

Added: llvm/trunk/test/CodeGen/Hexagon/pred-sched.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/pred-sched.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/pred-sched.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/pred-sched.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,41 @@
+; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
+
+; We want to see a .new instruction in this sequence.
+; CHECK: p[[PRED:[0-3]]] = tstbit
+; CHECK: if (p[[PRED]].new)
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define zeroext i16 @f0(i8 zeroext %a0, i16 zeroext %a1) #0 {
+b0:
+  %v0 = zext i8 %a0 to i32
+  %v1 = zext i16 %a1 to i32
+  %v2 = xor i32 %v0, %v1
+  %v3 = and i32 %v2, 1
+  %v4 = lshr i8 %a0, 1
+  %v5 = icmp eq i32 %v3, 0
+  %v6 = lshr i16 %a1, 1
+  %v7 = xor i16 %v6, -24575
+  %v8 = select i1 %v5, i16 %v6, i16 %v7
+  %v9 = zext i8 %v4 to i32
+  %v10 = zext i16 %v8 to i32
+  %v11 = xor i32 %v9, %v10
+  %v12 = and i32 %v11, 1
+  %v13 = lshr i8 %a0, 2
+  %v14 = icmp eq i32 %v12, 0
+  %v15 = lshr i16 %v8, 1
+  %v16 = xor i16 %v15, -24575
+  %v17 = select i1 %v14, i16 %v15, i16 %v16
+  %v18 = zext i8 %v13 to i32
+  %v19 = zext i16 %v17 to i32
+  %v20 = xor i32 %v18, %v19
+  %v21 = and i32 %v20, 1
+  %v22 = icmp eq i32 %v21, 0
+  %v23 = lshr i16 %v17, 1
+  %v24 = xor i16 %v23, -24575
+  %v25 = select i1 %v22, i16 %v23, i16 %v24
+  ret i16 %v25
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/pred-simp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/pred-simp.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/pred-simp.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/pred-simp.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,27 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK-NOT: not(
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+define i32 @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = icmp slt i32 %a0, %a1
+  %v1 = add nsw i32 %a1, %a0
+  %v2 = icmp sgt i32 %v1, 10
+  %v3 = icmp eq i1 %v0, false
+  %v4 = or i1 %v3, %v2
+  br i1 %v4, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v5 = mul nsw i32 %a0, 2
+  %v6 = icmp sgt i32 %v5, %a1
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v7 = phi i1 [ %v6, %b1 ], [ true, %b0 ]
+  %v8 = zext i1 %v7 to i32
+  ret i32 %v8
+}
+
+attributes #0 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/pred-taken-jump.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/pred-taken-jump.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/pred-taken-jump.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/pred-taken-jump.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,52 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Predicated (old) taken jumps weren't supported prior to V60. The purpose
+; of this  test is to make sure that these instructions are not
+; generated for V55.
+
+; CHECK-NOT: if ({{!?}}p{{[0-3]}}) jump:t
+
+%s.0 = type { %s.0*, i8 }
+
+define i32 @f0(%s.0** nocapture %a0, i32 %a1) #0 {
+b0:
+  %v0 = and i32 %a1, 63
+  %v1 = icmp eq i32 %v0, %a1
+  br i1 %v1, label %b1, label %b7
+
+b1:                                               ; preds = %b0
+  %v2 = tail call i8* @f1()
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v3 = phi i32 [ %v9, %b4 ], [ 0, %b1 ]
+  %v4 = phi i32 [ %v5, %b4 ], [ 0, %b1 ]
+  %v5 = add i32 %v4, 1
+  %v6 = icmp ult i32 %v5, 7
+  br i1 %v6, label %b3, label %b5
+
+b3:                                               ; preds = %b2
+  %v7 = tail call %s.0* @f2(i8* undef, i8* %v2)
+  %v8 = icmp eq %s.0* %v7, null
+  br i1 %v8, label %b7, label %b4
+
+b4:                                               ; preds = %b3
+  %v9 = select i1 undef, i32 1, i32 %v3
+  br label %b2
+
+b5:                                               ; preds = %b2
+  br i1 undef, label %b7, label %b6
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b6, %b5, %b3, %b0
+  %v10 = phi i32 [ -1, %b0 ], [ 1, %b6 ], [ %v3, %b5 ], [ -1, %b3 ]
+  ret i32 %v10
+}
+
+declare i8* @f1()
+
+declare %s.0* @f2(i8*, i8*)
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/predtfrs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/predtfrs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/predtfrs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/predtfrs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,62 @@
+; RUN: llc -march=hexagon -hexagon-expand-condsets=0 < %s | FileCheck %s
+
+; CHECK: cmp.gt
+; CHECK-NOT: r1 = p0
+; CHECK-NOT: p0 = r1
+; CHECK: mux
+
+%s.0 = type { i32 }
+%s.1 = type { i64 }
+
+ at g0 = common global i16 0, align 2
+
+; Function Attrs: nounwind
+define void @f0(%s.0* nocapture %a0, %s.1* nocapture %a1, %s.1* nocapture %a2) #0 {
+b0:
+  %v0 = load i16, i16* @g0, align 2, !tbaa !0
+  %v1 = icmp eq i16 %v0, 3
+  %v2 = select i1 %v1, i32 -1, i32 34
+  %v3 = getelementptr inbounds %s.0, %s.0* %a0, i32 0, i32 0
+  %v4 = load i32, i32* %v3, align 4
+  %v5 = zext i32 %v4 to i64
+  %v6 = getelementptr inbounds %s.0, %s.0* %a0, i32 1, i32 0
+  %v7 = load i32, i32* %v6, align 4
+  %v8 = zext i32 %v7 to i64
+  %v9 = shl nuw i64 %v8, 32
+  %v10 = or i64 %v9, %v5
+  %v11 = getelementptr inbounds %s.1, %s.1* %a1, i32 0, i32 0
+  %v12 = load i64, i64* %v11, align 8, !tbaa !4
+  %v13 = tail call i64 @llvm.hexagon.M2.vrcmpyr.s0(i64 %v10, i64 %v12)
+  %v14 = tail call i64 @llvm.hexagon.S2.asr.i.p(i64 %v13, i32 14)
+  %v15 = lshr i64 %v14, 32
+  %v16 = trunc i64 %v15 to i32
+  %v17 = tail call i32 @llvm.hexagon.C2.cmpgti(i32 %v16, i32 0)
+  %v18 = trunc i64 %v14 to i32
+  %v19 = tail call i32 @llvm.hexagon.C2.mux(i32 %v17, i32 %v2, i32 %v18)
+  %v20 = zext i32 %v19 to i64
+  %v21 = getelementptr inbounds %s.1, %s.1* %a2, i32 2, i32 0
+  store i64 %v20, i64* %v21, align 8
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.M2.vrcmpyr.s0(i64, i64) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.asr.i.p(i64, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.cmpgti(i32, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.C2.mux(i32, i32, i32) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"short", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"long long", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/prefetch-intr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/prefetch-intr.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/prefetch-intr.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/prefetch-intr.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,25 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; CHECK: dcfetch(
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  store i32 0, i32* %v0, align 4, !tbaa !0
+  %v1 = bitcast i32* %v0 to i8*
+  call void @llvm.hexagon.prefetch(i8* %v1)
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare void @llvm.hexagon.prefetch(i8*) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/prefetch-shuffler-ice.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,24 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+; Expect successful compilation.
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind optsize
+define void @f0(i32* nocapture %a0, i8* %a1) #0 {
+b0:
+  call void @llvm.hexagon.prefetch(i8* %a1)
+  store i32 0, i32* %a0, align 4, !tbaa !0
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @llvm.hexagon.prefetch(i8*) #1
+
+attributes #0 = { nounwind optsize "target-cpu"="hexagonv55" }
+attributes #1 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/prob-types.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/prob-types.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/prob-types.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/prob-types.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,106 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; REQUIRES: asserts
+
+; This was aborting in Machine Loop Invariant Code Motion,
+; we want to see something generated in assembly.
+; CHECK: f0:
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.128B(<32 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32>, <32 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32>, <32 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32>, <32 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vand.128B(<32 x i32>, <32 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32>, <32 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vrdelta.128B(<32 x i32>, <32 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vunpackuh.128B(<32 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32>, <32 x i32>) #0
+
+; Function Attrs: nounwind
+define hidden void @f0(<32 x i32>* %a0, <32 x i32>* %a1, i32 %a2, <32 x i32> %a3, <32 x i32> %a4, <32 x i32> %a5, i32 %a6, <32 x i32> %a7) #1 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  %v0 = phi <32 x i32>* [ %v38, %b1 ], [ %a0, %b0 ]
+  %v1 = phi <32 x i32>* [ %v4, %b1 ], [ %a1, %b0 ]
+  %v2 = phi i32 [ %v39, %b1 ], [ %a2, %b0 ]
+  %v3 = phi <32 x i32> [ %v34, %b1 ], [ %a3, %b0 ]
+  %v4 = getelementptr inbounds <32 x i32>, <32 x i32>* %v1, i32 1
+  %v5 = load <32 x i32>, <32 x i32>* %v1, align 128, !tbaa !0
+  %v6 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.128B(<32 x i32> %v5, i32 16843009) #2
+  %v7 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %v6, <32 x i32> %a4, i32 2) #2
+  %v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v6, <32 x i32> %v7) #2
+  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %v8, <32 x i32> %a4, i32 4) #2
+  %v10 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v8, <32 x i32> %v9) #2
+  %v11 = tail call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> %v10, <32 x i32> %a4, i32 8) #2
+  %v12 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v10, <32 x i32> %v11) #2
+  %v13 = tail call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> %v12, <32 x i32> %a4, i32 16) #2
+  %v14 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v12, <32 x i32> %v13) #2
+  %v15 = tail call <32 x i32> @llvm.hexagon.V6.vlalignb.128B(<32 x i32> %v14, <32 x i32> %a4, i32 32) #2
+  %v16 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v14, <32 x i32> %v15) #2
+  %v17 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v16, <32 x i32> %v15) #2
+  %v18 = tail call <32 x i32> @llvm.hexagon.V6.vand.128B(<32 x i32> %v5, <32 x i32> %a5) #2
+  %v19 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> %v17, <32 x i32> %a4, i32 2) #2
+  %v20 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v19, <32 x i32> %v18) #2
+  %v21 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v17, <32 x i32> %v20, i32 -2) #2
+  %v22 = tail call <32 x i32> @llvm.hexagon.V6.vrdelta.128B(<32 x i32> %v3, <32 x i32> %a7) #2
+  %v23 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v21) #2
+  %v24 = tail call <64 x i32> @llvm.hexagon.V6.vunpackuh.128B(<32 x i32> %v23) #2
+  %v25 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v21) #2
+  %v26 = tail call <64 x i32> @llvm.hexagon.V6.vunpackuh.128B(<32 x i32> %v25) #2
+  %v27 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v24) #2
+  %v28 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v27) #2
+  %v29 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v24) #2
+  %v30 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v29) #2
+  %v31 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v26) #2
+  %v32 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v31) #2
+  %v33 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v26) #2
+  %v34 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.128B(<32 x i32> %v22, <32 x i32> %v33) #2
+  %v35 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 1
+  store <32 x i32> %v28, <32 x i32>* %v0, align 128, !tbaa !0
+  %v36 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 2
+  store <32 x i32> %v30, <32 x i32>* %v35, align 128, !tbaa !0
+  %v37 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 3
+  store <32 x i32> %v32, <32 x i32>* %v36, align 128, !tbaa !0
+  %v38 = getelementptr inbounds <32 x i32>, <32 x i32>* %v0, i32 4
+  store <32 x i32> %v34, <32 x i32>* %v37, align 128, !tbaa !0
+  %v39 = add nsw i32 %v2, 128
+  %v40 = icmp slt i32 %v39, %a6
+  br i1 %v40, label %b1, label %b2
+
+b2:                                               ; preds = %b1
+  ret void
+}
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #2 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/ps_call_nr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/ps_call_nr.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/ps_call_nr.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/ps_call_nr.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,46 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; Test that the compiler does not generate an invalid packet with three
+; instructions that each requires slot 2 or 3. The specification for
+; PS_call_nr was incorrect, which allowed that instrution to go in any slot.
+
+; CHECK: extractu
+; CHECK: extractu
+; CHECK: {
+; CHECK: call
+
+%s.0 = type <{ i8*, i8*, i16, i8, i8, i8 }>
+
+ at g0 = external constant %s.0, section ".rodata.trace", align 1
+
+define void @f0() local_unnamed_addr {
+b0:
+  %v0 = load i32, i32* undef, align 4
+  %v1 = trunc i32 %v0 to i2
+  switch i2 %v1, label %b4 [
+    i2 1, label %b1
+    i2 -1, label %b2
+    i2 -2, label %b2
+    i2 0, label %b3
+  ]
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0, %b0
+  %v2 = load i32, i32* undef, align 4
+  %v3 = lshr i32 %v2, 14
+  %v4 = and i32 %v3, 2047
+  %v5 = lshr i32 %v2, 3
+  %v6 = and i32 %v5, 2047
+  tail call void @f1(%s.0* nonnull @g0, i32 %v6, i32 %v4, i32 0, i32 0)
+  unreachable
+
+b3:                                               ; preds = %b0
+  ret void
+
+b4:                                               ; preds = %b0
+  unreachable
+}
+
+declare void @f1(%s.0*, i32, i32, i32, i32) local_unnamed_addr

Added: llvm/trunk/test/CodeGen/Hexagon/rdf-copy-undef.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/rdf-copy-undef.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/rdf-copy-undef.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/rdf-copy-undef.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,159 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; After a copy R20 = R29, RDF copy propagation attempted to replace R20 with
+; R29. R29 did not have a reaching def at that point (which isn't unusual),
+; but copy propagation tried to link the new use of R29 to the presumed
+; reaching def (which was null), causing a crash.
+
+target triple = "hexagon"
+
+ at g0 = external unnamed_addr global i1, align 4
+
+; Function Attrs: nounwind
+declare i8* @llvm.stacksave() #0
+
+; Function Attrs: nounwind
+declare void @llvm.stackrestore(i8*) #0
+
+; Function Attrs: norecurse nounwind
+declare fastcc void @f0(i16 signext, i16 signext, i16 signext, i16* nocapture readonly, i16 signext, i16* nocapture) unnamed_addr #1
+
+; Function Attrs: norecurse nounwind
+declare fastcc signext i16 @f1(i16 signext, i16 signext) unnamed_addr #1
+
+; Function Attrs: norecurse nounwind
+define fastcc i32 @f2(i16* nocapture readonly %a0, i16 signext %a1, i16 signext %a2, i16* nocapture readonly %a3, i16 signext %a4, i16* nocapture readonly %a51, i16* nocapture %a6) unnamed_addr #1 {
+b0:
+  %v0 = tail call i8* @llvm.stacksave()
+  %v1 = tail call fastcc signext i16 @f1(i16 signext %a2, i16 signext %a1)
+  br i1 undef, label %b7, label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b2
+
+b2:                                               ; preds = %b1
+  br i1 undef, label %b4, label %b8
+
+b3:                                               ; preds = %b1
+  br i1 undef, label %b5, label %b8
+
+b4:                                               ; preds = %b4, %b2
+  br i1 undef, label %b4, label %b6, !llvm.loop !2
+
+b5:                                               ; preds = %b5, %b3
+  %v2 = phi i16 [ %v3, %b5 ], [ 0, %b3 ]
+  %v3 = add i16 %v2, 1
+  %v4 = icmp sgt i32 0, -1073741825
+  br i1 %v4, label %b5, label %b6
+
+b6:                                               ; preds = %b5, %b4
+  %v5 = phi i16 [ %v3, %b5 ], [ undef, %b4 ]
+  br label %b7
+
+b7:                                               ; preds = %b6, %b0
+  %v6 = phi i16 [ %v5, %b6 ], [ 0, %b0 ]
+  br i1 undef, label %b9, label %b8
+
+b8:                                               ; preds = %b7, %b3, %b2
+  %v7 = or i32 0, undef
+  br label %b9
+
+b9:                                               ; preds = %b8, %b7
+  %v8 = phi i16 [ 0, %b8 ], [ %v6, %b7 ]
+  %v9 = phi i32 [ %v7, %b8 ], [ 0, %b7 ]
+  %v10 = load i16, i16* undef, align 2, !tbaa !4
+  %v11 = sext i16 %v10 to i32
+  %v12 = zext i16 %v10 to i32
+  br i1 undef, label %b10, label %b11
+
+b10:                                              ; preds = %b9
+  store i1 true, i1* @g0, align 4
+  br label %b11
+
+b11:                                              ; preds = %b10, %b9
+  %v13 = load i16, i16* undef, align 2, !tbaa !4
+  %v14 = sext i16 %v13 to i32
+  %v15 = shl nuw i32 %v12, 16
+  %v16 = and i32 %v9, 65535
+  %v17 = mul nsw i32 %v11, %v16
+  %v18 = sitofp i32 %v15 to double
+  %v19 = fsub double %v18, undef
+  %v20 = sub nsw i32 %v15, %v17
+  %v21 = fptosi double %v19 to i32
+  %v22 = select i1 undef, i32 %v21, i32 %v20
+  %v23 = mul nsw i32 %v14, %v16
+  %v24 = add nsw i32 %v23, %v22
+  %v25 = add nsw i32 %v24, 32768
+  %v26 = lshr i32 %v25, 16
+  %v27 = xor i1 undef, true
+  %v28 = and i1 %v27, undef
+  br i1 %v28, label %b12, label %b13
+
+b12:                                              ; preds = %b11
+  store i1 true, i1* @g0, align 4
+  br label %b13
+
+b13:                                              ; preds = %b12, %b11
+  br i1 undef, label %b14, label %b24
+
+b14:                                              ; preds = %b13
+  br label %b15
+
+b15:                                              ; preds = %b23, %b14
+  br i1 undef, label %b16, label %b17
+
+b16:                                              ; preds = %b15
+  br label %b19
+
+b17:                                              ; preds = %b15
+  %v29 = trunc i32 %v26 to i16
+  %v30 = icmp eq i16 %v29, -32768
+  %v31 = and i1 undef, %v30
+  br i1 %v31, label %b18, label %b19
+
+b18:                                              ; preds = %b17
+  store i1 true, i1* @g0, align 4
+  br label %b20
+
+b19:                                              ; preds = %b17, %b16
+  br label %b20
+
+b20:                                              ; preds = %b19, %b18
+  %v32 = phi i32 [ 2147483647, %b18 ], [ 0, %b19 ]
+  %v33 = icmp eq i16 %v8, 32767
+  br i1 %v33, label %b21, label %b22
+
+b21:                                              ; preds = %b20
+  store i1 true, i1* @g0, align 4
+  br label %b23
+
+b22:                                              ; preds = %b20
+  br label %b23
+
+b23:                                              ; preds = %b22, %b21
+  %v34 = add nsw i32 %v32, 32768
+  %v35 = lshr i32 %v34, 16
+  %v36 = trunc i32 %v35 to i16
+  store i16 %v36, i16* undef, align 2, !tbaa !4
+  br i1 undef, label %b24, label %b15
+
+b24:                                              ; preds = %b23, %b13
+  call fastcc void @f0(i16 signext undef, i16 signext %a1, i16 signext %a2, i16* %a3, i16 signext %a4, i16* %a6)
+  call void @llvm.stackrestore(i8* %v0)
+  ret i32 undef
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { norecurse nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 6, !"Target Features", !1}
+!1 = !{!"+hvx,+hvx-length64b"}
+!2 = distinct !{!2, !3}
+!3 = !{!"llvm.loop.threadify", i32 43789156}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"short", !6, i64 0}
+!6 = !{!"omnipotent char", !7, i64 0}
+!7 = !{!"Simple C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/rdf-kill-last-op.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/rdf-kill-last-op.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/rdf-kill-last-op.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/rdf-kill-last-op.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,213 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { %s.1 }
+%s.1 = type { i32, i8* }
+
+ at g0 = external unnamed_addr constant [6 x [2 x i32]], align 8
+ at g1 = external constant %s.0, align 4
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  switch i32 undef, label %b4 [
+    i32 10, label %b5
+  ]
+
+b4:                                               ; preds = %b3, %b2
+  unreachable
+
+b5:                                               ; preds = %b3
+  br i1 undef, label %b7, label %b6
+
+b6:                                               ; preds = %b5
+  switch i32 undef, label %b40 [
+    i32 10, label %b38
+    i32 5, label %b8
+  ]
+
+b7:                                               ; preds = %b5
+  unreachable
+
+b8:                                               ; preds = %b6
+  br i1 undef, label %b9, label %b37
+
+b9:                                               ; preds = %b8
+  br i1 undef, label %b10, label %b37
+
+b10:                                              ; preds = %b9
+  br i1 undef, label %b12, label %b11
+
+b11:                                              ; preds = %b10
+  unreachable
+
+b12:                                              ; preds = %b10
+  br i1 undef, label %b13, label %b17
+
+b13:                                              ; preds = %b12
+  br i1 undef, label %b14, label %b15
+
+b14:                                              ; preds = %b13
+  unreachable
+
+b15:                                              ; preds = %b13
+  br i1 undef, label %b16, label %b18
+
+b16:                                              ; preds = %b15
+  unreachable
+
+b17:                                              ; preds = %b12
+  unreachable
+
+b18:                                              ; preds = %b15
+  br i1 undef, label %b19, label %b20
+
+b19:                                              ; preds = %b18
+  br label %b21
+
+b20:                                              ; preds = %b18
+  unreachable
+
+b21:                                              ; preds = %b35, %b19
+  %v0 = phi i32 [ 0, %b19 ], [ %v43, %b35 ]
+  %v1 = phi i32 [ 0, %b19 ], [ %v44, %b35 ]
+  %v2 = phi i16 [ undef, %b19 ], [ %v42, %b35 ]
+  %v3 = trunc i32 %v1 to i10
+  %v4 = lshr i10 366, %v3
+  %v5 = and i10 %v4, 1
+  %v6 = icmp eq i10 %v5, 0
+  br i1 %v6, label %b35, label %b22
+
+b22:                                              ; preds = %b21
+  %v7 = load i32, i32* undef, align 4, !tbaa !0
+  %v8 = load i32, i32* undef, align 4, !tbaa !4
+  %v9 = load i32, i32* undef, align 4, !tbaa !4
+  %v10 = icmp ne i32 %v8, 0
+  %v11 = and i1 %v10, undef
+  %v12 = and i1 undef, %v11
+  br i1 %v12, label %b23, label %b24
+
+b23:                                              ; preds = %b22
+  %v13 = mul nsw i32 %v9, %v9
+  %v14 = sdiv i32 %v13, undef
+  %v15 = trunc i32 %v14 to i16
+  br label %b24
+
+b24:                                              ; preds = %b23, %b22
+  %v16 = phi i16 [ %v15, %b23 ], [ 0, %b22 ]
+  %v17 = icmp ugt i16 %v16, undef
+  %v18 = zext i1 %v17 to i32
+  %v19 = add nsw i32 %v18, %v0
+  %v20 = load i8, i8* undef, align 4, !tbaa !6
+  %v21 = zext i8 %v20 to i32
+  %v22 = sub nsw i32 6, %v21
+  %v23 = add nsw i32 %v22, -1
+  br i1 false, label %b39, label %b25, !prof !19
+
+b25:                                              ; preds = %b24
+  %v24 = getelementptr inbounds [6 x [2 x i32]], [6 x [2 x i32]]* @g0, i32 0, i32 %v21, i32 0
+  %v25 = load i32, i32* %v24, align 8, !tbaa !0
+  %v26 = icmp eq i32 undef, %v25
+  br i1 %v26, label %b26, label %b27
+
+b26:                                              ; preds = %b25
+  br i1 undef, label %b32, label %b27
+
+b27:                                              ; preds = %b26, %b25
+  %v27 = getelementptr inbounds [6 x [2 x i32]], [6 x [2 x i32]]* @g0, i32 0, i32 %v23, i32 0
+  %v28 = load i32, i32* %v27, align 8, !tbaa !0
+  %v29 = icmp eq i32 undef, %v28
+  br i1 %v29, label %b28, label %b29
+
+b28:                                              ; preds = %b27
+  br i1 undef, label %b32, label %b29
+
+b29:                                              ; preds = %b28, %b27
+  %v30 = load i32, i32* undef, align 4, !tbaa !4
+  %v31 = load i32, i32* undef, align 4, !tbaa !4
+  %v32 = icmp ne i32 %v30, 0
+  %v33 = and i1 %v32, undef
+  %v34 = and i1 undef, %v33
+  br i1 %v34, label %b30, label %b31
+
+b30:                                              ; preds = %b29
+  %v35 = mul nsw i32 %v31, %v31
+  %v36 = sdiv i32 %v35, 0
+  %v37 = trunc i32 %v36 to i16
+  br label %b31
+
+b31:                                              ; preds = %b30, %b29
+  %v38 = phi i16 [ %v37, %b30 ], [ 0, %b29 ]
+  br label %b32
+
+b32:                                              ; preds = %b31, %b28, %b26
+  %v39 = phi i16 [ %v38, %b31 ], [ %v2, %b28 ], [ %v2, %b26 ]
+  br i1 undef, label %b33, label %b34
+
+b33:                                              ; preds = %b32
+  call void (%s.0*, i32, ...) @f1(%s.0* nonnull @g1, i32 6, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 %v7) #0
+  br label %b34
+
+b34:                                              ; preds = %b33, %b32
+  %v40 = icmp slt i32 %v19, 0
+  %v41 = and i1 %v40, undef
+  br i1 %v41, label %b35, label %b36
+
+b35:                                              ; preds = %b34, %b21
+  %v42 = phi i16 [ %v2, %b21 ], [ %v39, %b34 ]
+  %v43 = phi i32 [ %v0, %b21 ], [ %v19, %b34 ]
+  %v44 = add nuw nsw i32 %v1, 1
+  br label %b21
+
+b36:                                              ; preds = %b34
+  unreachable
+
+b37:                                              ; preds = %b9, %b8
+  unreachable
+
+b38:                                              ; preds = %b6
+  unreachable
+
+b39:                                              ; preds = %b24
+  unreachable
+
+b40:                                              ; preds = %b6
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @f1(%s.0*, i32, ...) #0
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"long", !2, i64 0}
+!6 = !{!7, !2, i64 136}
+!7 = !{!"x", !8, i64 0, !9, i64 8, !11, i64 52, !14, i64 88, !2, i64 116, !2, i64 117, !18, i64 118, !15, i64 128, !15, i64 132, !2, i64 136, !2, i64 140, !2, i64 180, !12, i64 220}
+!8 = !{!"", !2, i64 0, !2, i64 1, !2, i64 2, !2, i64 3, !2, i64 4, !2, i64 5}
+!9 = !{!"", !2, i64 0, !5, i64 4, !5, i64 8, !5, i64 12, !5, i64 16, !5, i64 20, !5, i64 24, !10, i64 28, !2, i64 32, !2, i64 33, !10, i64 36, !5, i64 40}
+!10 = !{!"any pointer", !2, i64 0}
+!11 = !{!"", !5, i64 0, !2, i64 4, !12, i64 20, !2, i64 32}
+!12 = !{!"", !13, i64 0, !13, i64 2, !13, i64 4, !13, i64 6, !13, i64 8, !13, i64 10}
+!13 = !{!"short", !2, i64 0}
+!14 = !{!"", !15, i64 0, !13, i64 2, !13, i64 4, !16, i64 8}
+!15 = !{!"", !2, i64 0}
+!16 = !{!"", !1, i64 0, !2, i64 4, !2, i64 5, !17, i64 8}
+!17 = !{!"", !2, i64 0, !5, i64 4, !5, i64 8}
+!18 = !{!"", !2, i64 0, !2, i64 1, !2, i64 2, !2, i64 3, !8, i64 4}
+!19 = !{!"branch_weights", i32 4, i32 64}

Added: llvm/trunk/test/CodeGen/Hexagon/redundant-branching2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/redundant-branching2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/redundant-branching2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/redundant-branching2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,74 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+; This test checks if redundant conditional branches are removed.
+
+; CHECK: memub
+; CHECK: memub
+; CHECK: memub
+; CHECK-NOT: if{{.*}}jump .LBB
+; CHECK: cmp.eq
+
+target triple = "hexagon-unknown--elf"
+
+; Function Attrs: nounwind
+declare void @f0() #0
+
+; Function Attrs: nounwind
+define void @f1(i8* nocapture readonly %a0, i32 %a1) #0 {
+b0:
+  br i1 undef, label %b8, label %b1
+
+b1:                                               ; preds = %b0
+  tail call void @f0() #0
+  br i1 false, label %b8, label %b2
+
+b2:                                               ; preds = %b1
+  %v0 = getelementptr inbounds i8, i8* %a0, i32 undef
+  %v1 = sub i32 0, %a1
+  %v2 = icmp eq i32 undef, undef
+  br label %b3
+
+b3:                                               ; preds = %b6, %b2
+  %v3 = phi i8* [ undef, %b2 ], [ %v17, %b6 ]
+  %v4 = phi i8* [ %v0, %b2 ], [ null, %b6 ]
+  %v5 = phi i32 [ 1, %b2 ], [ 0, %b6 ]
+  br i1 %v2, label %b4, label %b5
+
+b4:                                               ; preds = %b3
+  %v6 = load i8, i8* %v3, align 1
+  br label %b6
+
+b5:                                               ; preds = %b3
+  %v7 = load i8, i8* %v4, align 1
+  %v8 = zext i8 %v7 to i32
+  %v9 = getelementptr inbounds i8, i8* %v4, i32 %v1
+  %v10 = load i8, i8* %v9, align 1
+  %v11 = zext i8 %v10 to i32
+  %v12 = sub nsw i32 %v8, %v11
+  br label %b6
+
+b6:                                               ; preds = %b5, %b4
+  %v13 = phi i8 [ 0, %b5 ], [ %v6, %b4 ]
+  %v14 = phi i32 [ %v12, %b5 ], [ 0, %b4 ]
+  %v15 = zext i8 %v13 to i32
+  %v16 = mul nsw i32 %v14, %v14
+  %v17 = getelementptr inbounds i8, i8* %v3, i32 1
+  %v18 = sub nsw i32 0, %v15
+  %v19 = mul nsw i32 %v18, %v18
+  %v20 = add nuw i32 %v16, 0
+  %v21 = add i32 %v20, 0
+  %v22 = add i32 %v21, 0
+  %v23 = lshr i32 %v22, 1
+  %v24 = add nuw nsw i32 %v23, %v19
+  %v25 = add nsw i32 %v24, 0
+  store i32 %v25, i32* null, align 4
+  %v26 = icmp eq i32 %v5, undef
+  br i1 %v26, label %b7, label %b3
+
+b7:                                               ; preds = %b6
+  unreachable
+
+b8:                                               ; preds = %b1, %b0
+  ret void
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/reg-eq-cmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/reg-eq-cmp.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/reg-eq-cmp.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/reg-eq-cmp.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,44 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; Generate reg = cmp.
+
+ at g0 = common global i8 0, align 1
+ at g1 = common global i32 0, align 4
+ at g2 = common global i8 0, align 1
+ at g3 = global i8 65, align 1
+
+; CHECK-LABEL: f0:
+; CHECK: r{{[0-9]+}} = cmp.eq(r{{[0-9]+}},#65)
+define i32 @f0() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = icmp eq i8 %v0, 65
+  %v2 = zext i1 %v1 to i32
+  %v3 = load i32, i32* @g1, align 4, !tbaa !3
+  %v4 = or i32 %v2, %v3
+  store i32 %v4, i32* @g1, align 4, !tbaa !3
+  store i8 66, i8* @g2, align 1, !tbaa !0
+  ret i32 undef
+}
+
+; CHECK-LABEL: f1:
+; CHECK: r{{[0-9]+}} = cmp.eq(r{{[0-9]+}},r{{[0-9]+}})
+define i32 @f1() #0 {
+b0:
+  %v0 = load i8, i8* @g0, align 1, !tbaa !0
+  %v1 = load i8, i8* @g3, align 1, !tbaa !0
+  %v2 = icmp eq i8 %v0, %v1
+  %v3 = zext i1 %v2 to i32
+  %v4 = load i32, i32* @g1, align 4, !tbaa !3
+  %v5 = or i32 %v3, %v4
+  store i32 %v5, i32* @g1, align 4, !tbaa !3
+  store i8 66, i8* @g2, align 1, !tbaa !0
+  ret i32 undef
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"int", !1}

Added: llvm/trunk/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/reg-scav-imp-use-dbl-vec.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,286 @@
+; RUN: llc -march=hexagon -O3 < %s | FileCheck %s
+; REQUIRES: asserts
+
+; Check that the code compiles successfully.
+; CHECK: call f1
+
+target triple = "hexagon-unknown--elf"
+
+%s.0 = type { i64, i8*, [4 x i32], [4 x i32], [4 x i32], i32, i8, i8, [6 x i8] }
+
+; Function Attrs: nounwind
+declare noalias i8* @f0() local_unnamed_addr #0
+
+; Function Attrs: nounwind
+declare void @f1() local_unnamed_addr #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshufeh.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32>, <64 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vzh.128B(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <64 x i32> @llvm.hexagon.V6.vmpyuh.128B(<32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaslw.acc.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: noreturn nounwind
+define void @f2(%s.0* noalias nocapture readonly %a01, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i32 %a5, i32 %a6) local_unnamed_addr #2 {
+b0:
+  %v0 = getelementptr inbounds %s.0, %s.0* %a01, i32 0, i32 1
+  %v1 = bitcast i8** %v0 to i16**
+  %v2 = load i16*, i16** %v1, align 4
+  %v3 = tail call i8* @f0()
+  %v4 = icmp sgt i32 %a1, 0
+  %v5 = select i1 %v4, i32 0, i32 %a1
+  %v6 = or i32 %v5, 1
+  %v7 = icmp sgt i32 %v6, 0
+  br i1 %v7, label %b1, label %b2, !prof !1
+
+b1:                                               ; preds = %b0
+  br label %b4
+
+b2:                                               ; preds = %b0
+  %v8 = ashr i32 %a6, 6
+  %v9 = mul i32 %v8, 64
+  %v10 = add nsw i32 %v9, 255
+  %v11 = icmp sgt i32 %a6, -193
+  %v12 = ashr i32 %a5, 6
+  %v13 = ashr i32 %a4, 6
+  %v14 = ashr i32 %a2, 6
+  %v15 = icmp ult i32 %v10, 128
+  %v16 = tail call i8* @f0()
+  %v17 = icmp eq i8* %v16, null
+  br i1 %v17, label %b6, label %b3, !prof !2
+
+b3:                                               ; preds = %b2
+  %v18 = mul nsw i32 %v13, 16
+  %v19 = mul nsw i32 %v13, 19
+  %v20 = mul nsw i32 %v13, 17
+  %v21 = mul nsw i32 %v13, 18
+  br label %b7
+
+b4:                                               ; preds = %b4, %b1
+  br label %b4
+
+b5:                                               ; preds = %b8
+  br label %b6
+
+b6:                                               ; preds = %b5, %b2
+  tail call void @f1() #3
+  unreachable
+
+b7:                                               ; preds = %b8, %b3
+  %v22 = phi i8* [ %v16, %b3 ], [ %v28, %b8 ]
+  %v23 = phi i32 [ 1, %b3 ], [ %v27, %b8 ]
+  %v24 = sub i32 %v23, %a3
+  %v25 = mul i32 %v24, %v12
+  %v26 = sub i32 %v25, %v14
+  br i1 %v11, label %b9, label %b8
+
+b8:                                               ; preds = %b13, %b7
+  %v27 = add nuw nsw i32 %v23, 1
+  %v28 = tail call i8* @f0()
+  %v29 = icmp eq i8* %v28, null
+  br i1 %v29, label %b5, label %b7, !prof !2
+
+b9:                                               ; preds = %b7
+  %v30 = add i32 %v26, %v18
+  %v31 = add i32 %v26, %v19
+  %v32 = add i32 %v26, %v20
+  %v33 = add i32 %v26, %v21
+  %v34 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 undef) #3
+  %v35 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 8) #3
+  %v36 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v35, <32 x i32> %v35)
+  %v37 = bitcast i8* %v22 to i16*
+  br i1 %v15, label %b13, label %b10
+
+b10:                                              ; preds = %b9
+  %v38 = tail call <64 x i32> @llvm.hexagon.V6.vzh.128B(<32 x i32> undef) #3
+  %v39 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> undef, <64 x i32> %v38) #3
+  %v40 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v39, <64 x i32> %v36) #3
+  %v41 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v40)
+  %v42 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v41, i32 4) #3
+  %v43 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> undef, <32 x i32> %v42)
+  %v44 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v43) #3
+  %v45 = tail call <32 x i32> @llvm.hexagon.V6.vshufeh.128B(<32 x i32> undef, <32 x i32> %v44) #3
+  br label %b11
+
+b11:                                              ; preds = %b11, %b10
+  %v46 = phi <32 x i32> [ %v120, %b11 ], [ undef, %b10 ]
+  %v47 = phi <32 x i32> [ %v115, %b11 ], [ undef, %b10 ]
+  %v48 = phi <32 x i32> [ %v110, %b11 ], [ undef, %b10 ]
+  %v49 = phi i32 [ %v124, %b11 ], [ 0, %b10 ]
+  %v50 = phi i32 [ %v125, %b11 ], [ undef, %b10 ]
+  %v51 = add i32 %v49, %v33
+  %v52 = shl nsw i32 %v51, 6
+  %v53 = getelementptr inbounds i16, i16* %v2, i32 %v52
+  %v54 = bitcast i16* %v53 to <32 x i32>*
+  %v55 = load <32 x i32>, <32 x i32>* %v54, align 128, !tbaa !3
+  %v56 = add i32 %v49, %v32
+  %v57 = shl nsw i32 %v56, 6
+  %v58 = getelementptr inbounds i16, i16* %v2, i32 %v57
+  %v59 = bitcast i16* %v58 to <32 x i32>*
+  %v60 = load <32 x i32>, <32 x i32>* %v59, align 128, !tbaa !3
+  %v61 = add i32 %v31, %v49
+  %v62 = shl nsw i32 %v61, 6
+  %v63 = getelementptr inbounds i16, i16* %v2, i32 %v62
+  %v64 = bitcast i16* %v63 to <32 x i32>*
+  %v65 = load <32 x i32>, <32 x i32>* %v64, align 128, !tbaa !3
+  %v66 = add i32 %v49, %v30
+  %v67 = shl nsw i32 %v66, 6
+  %v68 = getelementptr inbounds i16, i16* %v2, i32 %v67
+  %v69 = bitcast i16* %v68 to <32 x i32>*
+  %v70 = load <32 x i32>, <32 x i32>* %v69, align 128, !tbaa !3
+  %v71 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v55, <32 x i32> undef, i32 92)
+  %v72 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v71, i32 1) #3
+  %v73 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v72, <32 x i32> %v34) #3
+  %v74 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.128B(<32 x i32> %v73, i32 393222) #3
+  %v75 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v60, <32 x i32> %v48, i32 92)
+  %v76 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v75, i32 1) #3
+  %v77 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v76, <32 x i32> %v34) #3
+  %v78 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v65, <32 x i32> undef, i32 92)
+  %v79 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v78, i32 1) #3
+  %v80 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v79, <32 x i32> %v34) #3
+  %v81 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v77, <32 x i32> %v80) #3
+  %v82 = tail call <64 x i32> @llvm.hexagon.V6.vzh.128B(<32 x i32> %v81) #3
+  %v83 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v74)
+  %v84 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v82)
+  %v85 = tail call <32 x i32> @llvm.hexagon.V6.vaslw.acc.128B(<32 x i32> %v83, <32 x i32> %v84, i32 2) #3
+  %v86 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v85, <32 x i32> undef)
+  %v87 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v70, <32 x i32> %v47, i32 92)
+  %v88 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v87, i32 1) #3
+  %v89 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v88, <32 x i32> %v34) #3
+  %v90 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> undef, <32 x i32> %v46, i32 92)
+  %v91 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v90, i32 1) #3
+  %v92 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v91, <32 x i32> %v34) #3
+  %v93 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v89, <32 x i32> %v92) #3
+  %v94 = tail call <64 x i32> @llvm.hexagon.V6.vzh.128B(<32 x i32> %v93) #3
+  %v95 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v86, <64 x i32> %v94) #3
+  %v96 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v95, <64 x i32> %v36) #3
+  %v97 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v96)
+  %v98 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v97, i32 4) #3
+  %v99 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v98, <32 x i32> undef)
+  %v100 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v99) #3
+  %v101 = tail call <32 x i32> @llvm.hexagon.V6.vshufeh.128B(<32 x i32> undef, <32 x i32> %v100) #3
+  %v102 = shl nsw i32 %v49, 6
+  %v103 = getelementptr inbounds i16, i16* %v37, i32 %v102
+  %v104 = bitcast i16* %v103 to <32 x i32>*
+  store <32 x i32> %v101, <32 x i32>* %v104, align 128, !tbaa !6
+  %v105 = or i32 %v49, 1
+  %v106 = add i32 %v105, %v32
+  %v107 = shl nsw i32 %v106, 6
+  %v108 = getelementptr inbounds i16, i16* %v2, i32 %v107
+  %v109 = bitcast i16* %v108 to <32 x i32>*
+  %v110 = load <32 x i32>, <32 x i32>* %v109, align 128, !tbaa !3
+  %v111 = add i32 %v105, %v30
+  %v112 = shl nsw i32 %v111, 6
+  %v113 = getelementptr inbounds i16, i16* %v2, i32 %v112
+  %v114 = bitcast i16* %v113 to <32 x i32>*
+  %v115 = load <32 x i32>, <32 x i32>* %v114, align 128, !tbaa !3
+  %v116 = add i32 %v105, %v26
+  %v117 = shl nsw i32 %v116, 6
+  %v118 = getelementptr inbounds i16, i16* %v2, i32 %v117
+  %v119 = bitcast i16* %v118 to <32 x i32>*
+  %v120 = load <32 x i32>, <32 x i32>* %v119, align 128, !tbaa !3
+  %v121 = shl nsw i32 %v105, 6
+  %v122 = getelementptr inbounds i16, i16* %v37, i32 %v121
+  %v123 = bitcast i16* %v122 to <32 x i32>*
+  store <32 x i32> %v45, <32 x i32>* %v123, align 128, !tbaa !6
+  %v124 = add nuw nsw i32 %v49, 2
+  %v125 = add i32 %v50, -2
+  %v126 = icmp eq i32 %v125, 0
+  br i1 %v126, label %b12, label %b11
+
+b12:                                              ; preds = %b11
+  br label %b13
+
+b13:                                              ; preds = %b12, %b9
+  %v127 = phi i32 [ 0, %b9 ], [ %v124, %b12 ]
+  %v128 = add i32 %v127, %v33
+  %v129 = shl nsw i32 %v128, 6
+  %v130 = getelementptr inbounds i16, i16* %v2, i32 %v129
+  %v131 = bitcast i16* %v130 to <32 x i32>*
+  %v132 = load <32 x i32>, <32 x i32>* %v131, align 128, !tbaa !3
+  %v133 = add i32 %v127, %v30
+  %v134 = shl nsw i32 %v133, 6
+  %v135 = getelementptr inbounds i16, i16* %v2, i32 %v134
+  %v136 = bitcast i16* %v135 to <32 x i32>*
+  %v137 = load <32 x i32>, <32 x i32>* %v136, align 128, !tbaa !3
+  %v138 = add i32 %v127, %v26
+  %v139 = shl nsw i32 %v138, 6
+  %v140 = getelementptr inbounds i16, i16* %v2, i32 %v139
+  %v141 = bitcast i16* %v140 to <32 x i32>*
+  %v142 = load <32 x i32>, <32 x i32>* %v141, align 128, !tbaa !3
+  %v143 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v132, <32 x i32> undef, i32 92)
+  %v144 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v143, i32 1) #3
+  %v145 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v144, <32 x i32> %v34) #3
+  %v146 = tail call <64 x i32> @llvm.hexagon.V6.vmpyuh.128B(<32 x i32> %v145, i32 393222) #3
+  %v147 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v146)
+  %v148 = tail call <32 x i32> @llvm.hexagon.V6.vaslw.acc.128B(<32 x i32> %v147, <32 x i32> undef, i32 2) #3
+  %v149 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v148, <32 x i32> undef)
+  %v150 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v137, <32 x i32> undef, i32 92)
+  %v151 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v150, i32 1) #3
+  %v152 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v151, <32 x i32> %v34) #3
+  %v153 = tail call <32 x i32> @llvm.hexagon.V6.valignb.128B(<32 x i32> %v142, <32 x i32> undef, i32 92)
+  %v154 = tail call <32 x i32> @llvm.hexagon.V6.vasrh.128B(<32 x i32> %v153, i32 1) #3
+  %v155 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v154, <32 x i32> %v34) #3
+  %v156 = tail call <32 x i32> @llvm.hexagon.V6.vaddh.128B(<32 x i32> %v152, <32 x i32> %v155) #3
+  %v157 = tail call <64 x i32> @llvm.hexagon.V6.vzh.128B(<32 x i32> %v156) #3
+  %v158 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v149, <64 x i32> %v157) #3
+  %v159 = tail call <64 x i32> @llvm.hexagon.V6.vaddw.dv.128B(<64 x i32> %v158, <64 x i32> %v36) #3
+  %v160 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v159)
+  %v161 = tail call <32 x i32> @llvm.hexagon.V6.vlsrw.128B(<32 x i32> %v160, i32 4) #3
+  %v162 = tail call <64 x i32> @llvm.hexagon.V6.vcombine.128B(<32 x i32> %v161, <32 x i32> undef)
+  %v163 = tail call <32 x i32> @llvm.hexagon.V6.hi.128B(<64 x i32> %v162) #3
+  %v164 = tail call <32 x i32> @llvm.hexagon.V6.vshufeh.128B(<32 x i32> %v163, <32 x i32> undef) #3
+  %v165 = getelementptr inbounds i16, i16* %v37, i32 undef
+  %v166 = bitcast i16* %v165 to <32 x i32>*
+  store <32 x i32> %v164, <32 x i32>* %v166, align 128, !tbaa !6
+  br label %b8
+}
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { noreturn nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length128b" }
+attributes #3 = { nounwind }
+
+!llvm.module.flags = !{!0}
+
+!0 = !{i32 2, !"halide_mattrs", !"+hvxv60,+hvx-length128b"}
+!1 = !{!"branch_weights", i32 1073741824, i32 0}
+!2 = !{!"branch_weights", i32 0, i32 1073741824}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"input_yuv", !5}
+!5 = !{!"Halide buffer"}
+!6 = !{!7, !7, i64 0}
+!7 = !{!"blurred_ds_y", !5}

Added: llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-2.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-2.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-2.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-2.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,122 @@
+; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
+; CHECK: v{{[0-9]+}} = vmem(r{{[0-9]+}}+#0)
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define void @f0(i16* nocapture %a0) #0 {
+b0:
+  br i1 undef, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  %v0 = bitcast i16* %a0 to <16 x i32>*
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v1 = phi i32 [ 0, %b1 ], [ %v50, %b4 ]
+  %v2 = phi <16 x i32>* [ %v0, %b1 ], [ undef, %b4 ]
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v3 = phi i32 [ -4, %b2 ], [ %v40, %b3 ]
+  %v4 = add i32 0, -64
+  %v5 = getelementptr inbounds i8, i8* null, i32 %v4
+  %v6 = bitcast i8* %v5 to <16 x i32>*
+  %v7 = load <16 x i32>, <16 x i32>* %v6, align 64, !tbaa !0
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> undef, <16 x i32> %v7, i32 4)
+  %v9 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v8, <16 x i32> zeroinitializer)
+  %v10 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v9, <16 x i32> undef)
+  %v11 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v10, <16 x i32> undef, <16 x i32> undef)
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> zeroinitializer, <16 x i32> %v11, <16 x i32> undef)
+  %v13 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> undef, <16 x i32> %v12, <16 x i32> undef)
+  %v14 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> undef, i32 1)
+  %v15 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v14, <16 x i32> zeroinitializer)
+  %v16 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> zeroinitializer, <16 x i32> zeroinitializer)
+  %v17 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> zeroinitializer, <16 x i32> undef)
+  %v18 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v15, <16 x i32> undef)
+  %v19 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> zeroinitializer, <16 x i32> undef)
+  %v20 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v16, <16 x i32> undef)
+  %v21 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v19, <16 x i32> undef, <16 x i32> zeroinitializer)
+  %v22 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v20, <16 x i32> undef, <16 x i32> zeroinitializer)
+  %v23 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v22, <16 x i32> %v21)
+  %v24 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> zeroinitializer, <32 x i32> %v23, i32 16843009)
+  %v25 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v17, <16 x i32> %v13, <16 x i32> undef)
+  %v26 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v18, <16 x i32> %v25, <16 x i32> undef)
+  %v27 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v19, <16 x i32> %v26, <16 x i32> undef)
+  %v28 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v20, <16 x i32> %v27, <16 x i32> undef)
+  %v29 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> undef, <16 x i32> zeroinitializer)
+  %v30 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> zeroinitializer, <16 x i32> zeroinitializer)
+  %v31 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> undef, <16 x i32> undef)
+  %v32 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v29, <16 x i32> undef)
+  %v33 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v30, <16 x i32> undef)
+  %v34 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v24, <32 x i32> zeroinitializer, i32 16843009)
+  %v35 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v34, <32 x i32> undef, i32 16843009)
+  %v36 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> zeroinitializer, <16 x i32> %v28, <16 x i32> undef)
+  %v37 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v31, <16 x i32> %v36, <16 x i32> undef)
+  %v38 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v32, <16 x i32> %v37, <16 x i32> undef)
+  %v39 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v33, <16 x i32> %v38, <16 x i32> undef)
+  %v40 = add nsw i32 %v3, 3
+  %v41 = icmp eq i32 %v40, 5
+  br i1 %v41, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v42 = phi <16 x i32> [ %v39, %b3 ]
+  %v43 = phi <32 x i32> [ %v35, %b3 ]
+  %v44 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v43)
+  %v45 = tail call <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32> zeroinitializer, <16 x i32> %v44, i32 -2)
+  %v46 = tail call <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32> %v42)
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v45)
+  store <16 x i32> %v47, <16 x i32>* %v2, align 64, !tbaa !0
+  %v48 = getelementptr inbounds <16 x i32>, <16 x i32>* null, i32 1
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v46)
+  store <16 x i32> %v49, <16 x i32>* %v48, align 64, !tbaa !0
+  %v50 = add nsw i32 %v1, 1
+  %v51 = icmp slt i32 %v50, 0
+  br i1 %v51, label %b2, label %b5
+
+b5:                                               ; preds = %b4, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshuffvdd(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vunpackub(<16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-4.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-4.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-4.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-4.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,204 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the register scavenger does not fail because it can't find
+; a spill slot. This occurs the offset for a spilled object is too large
+; and requires another register to compute the location on the stack.
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture readonly %a0, i32 %a1, i32 %a2, i8* nocapture readonly %a3, i8* nocapture readonly %a4, i8* nocapture %a5) #0 {
+b0:
+  %v0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> zeroinitializer)
+  br i1 undef, label %b1, label %b5
+
+b1:                                               ; preds = %b0
+  %v1 = getelementptr inbounds i8, i8* %a3, i32 31
+  br label %b2
+
+b2:                                               ; preds = %b4, %b1
+  %v2 = phi <16 x i32>* [ undef, %b1 ], [ %v102, %b4 ]
+  %v3 = phi i32 [ %a2, %b1 ], [ undef, %b4 ]
+  %v4 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> undef, i32 undef)
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v5 = phi <32 x i32> [ %v4, %b2 ], [ %v72, %b3 ]
+  %v6 = phi <32 x i32> [ zeroinitializer, %b2 ], [ %v71, %b3 ]
+  %v7 = phi i32 [ -4, %b2 ], [ %v73, %b3 ]
+  %v8 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v9 = mul nsw i32 %v7, 9
+  %v10 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %v8, <16 x i32> undef, i32 4)
+  %v11 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v8, i32 4)
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v10, <16 x i32> undef)
+  %v13 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v11, <16 x i32> undef)
+  %v14 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32> %v12, <16 x i32> zeroinitializer, i32 0)
+  %v15 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v14, <16 x i32> %v12, <16 x i32> zeroinitializer, i32 1)
+  %v16 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v15, <16 x i32> %v12, <16 x i32> undef, i32 2)
+  %v17 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v16, <16 x i32> %v12, <16 x i32> undef, i32 3)
+  %v18 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v17, <16 x i32> %v12, <16 x i32> %v0, i32 4)
+  %v19 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v18, <16 x i32> %v12, <16 x i32> %v0, i32 5)
+  %v20 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v19, <16 x i32> %v12, <16 x i32> undef, i32 6)
+  %v21 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v20, <16 x i32> %v12, <16 x i32> undef, i32 7)
+  %v22 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> %v13, <16 x i32> %v0, i32 4)
+  %v23 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v22, <16 x i32> %v13, <16 x i32> %v0, i32 5)
+  %v24 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v23, <16 x i32> %v13, <16 x i32> undef, i32 6)
+  %v25 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v24, <16 x i32> %v13, <16 x i32> undef, i32 7)
+  %v26 = add nsw i32 %v9, 36
+  %v27 = getelementptr inbounds i8, i8* %a3, i32 %v26
+  %v28 = load i8, i8* %v27, align 1
+  %v29 = zext i8 %v28 to i32
+  %v30 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v29)
+  %v31 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %v21, i32 %v30)
+  %v32 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %v25, i32 %v30)
+  %v33 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v31)
+  %v34 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v33)
+  %v35 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v32)
+  %v36 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> %v35, <16 x i32> undef)
+  %v37 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v36, <16 x i32> %v34)
+  %v38 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v6, <32 x i32> %v37, i32 16843009)
+  %v39 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v10, <16 x i32> %v34)
+  %v40 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v11, <16 x i32> %v36)
+  %v41 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v39)
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v40)
+  %v43 = tail call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %v41, <16 x i32> %v42)
+  %v44 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v5, <32 x i32> %v43)
+  %v45 = add nsw i32 %v9, 37
+  %v46 = getelementptr inbounds i8, i8* %a3, i32 %v45
+  %v47 = load i8, i8* %v46, align 1
+  %v48 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v38, <32 x i32> undef, i32 16843009)
+  %v49 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v44, <32 x i32> undef)
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %v8, <16 x i32> undef, i32 2)
+  %v51 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v48, <32 x i32> undef, i32 16843009)
+  %v52 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v50, <16 x i32> undef)
+  %v53 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> undef, <16 x i32> zeroinitializer)
+  %v54 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v52)
+  %v55 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v53)
+  %v56 = tail call <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32> %v54, <16 x i32> %v55)
+  %v57 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v49, <32 x i32> %v56)
+  %v58 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v8, i32 1)
+  %v59 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v58, <16 x i32> undef)
+  %v60 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> undef, <16 x i32> %v59, <16 x i32> undef, i32 7)
+  %v61 = load i8, i8* undef, align 1
+  %v62 = zext i8 %v61 to i32
+  %v63 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v62)
+  %v64 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> undef, i32 %v63)
+  %v65 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> %v60, i32 %v63)
+  %v66 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v64)
+  %v67 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v66)
+  %v68 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v65)
+  %v69 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v68)
+  %v70 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v69, <16 x i32> %v67)
+  %v71 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v51, <32 x i32> %v70, i32 16843009)
+  %v72 = tail call <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32> %v57, <32 x i32> undef)
+  %v73 = add nsw i32 %v7, 1
+  %v74 = icmp eq i32 %v73, 5
+  br i1 %v74, label %b4, label %b3
+
+b4:                                               ; preds = %b3
+  %v75 = phi <32 x i32> [ %v72, %b3 ]
+  %v76 = phi <32 x i32> [ %v71, %b3 ]
+  %v77 = load i8, i8* %v1, align 1
+  %v78 = zext i8 %v77 to i32
+  %v79 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %v78)
+  %v80 = tail call <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32> undef, i32 %v79)
+  %v81 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> undef)
+  %v82 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v80)
+  %v83 = tail call <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32> undef, <16 x i32> %v82)
+  %v84 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v83, <16 x i32> %v81)
+  %v85 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v84, i32 16843009)
+  %v86 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v85)
+  %v87 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> %v86, i32 8388736)
+  %v88 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v87)
+  %v89 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v88, i32 1)
+  %v90 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v89, i32 1)
+  %v91 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v90, i32 1)
+  %v92 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v91, i32 1)
+  %v93 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> %v92, i32 1)
+  %v94 = tail call <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1> undef, <16 x i32> undef, <16 x i32> %v93)
+  %v95 = tail call <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1> undef, <16 x i32> %v94, <16 x i32> undef)
+  %v96 = tail call <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32> undef, i32 1)
+  %v97 = tail call <512 x i1> @llvm.hexagon.V6.vgtw(<16 x i32> %v96, <16 x i32> %v95)
+  %v98 = tail call <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1> %v97, <16 x i32> undef, <16 x i32> undef)
+  %v99 = tail call <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1> undef, <16 x i32> undef, <16 x i32> undef)
+  %v100 = tail call <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32> %v99, <16 x i32> %v98)
+  %v101 = tail call <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32> %v100, <16 x i32> undef)
+  %v102 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 1
+  store <16 x i32> %v101, <16 x i32>* %v2, align 64
+  %v103 = icmp sgt i32 %v3, 64
+  br i1 %v103, label %b2, label %b5
+
+b5:                                               ; preds = %b4, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32>, <16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.vsplatrb(i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpyub(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshuffob(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vaddw.dv(<32 x i32>, <32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vadduhw(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.vgtw(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddwnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubwnq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrw(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshuffeb(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshufeh(<16 x i32>, <16 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv62" "target-features"="+hvxv62,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-5.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-5.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-5.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug-5.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,263 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the register scavenger does not assert because a spill slot
+; was not found. The bug is that the Hexagon spill code was not allocating
+; the spill slot because the function that returns true, which indicates
+; the code changed when a spill is inserted, was not always returning true.
+
+; Function Attrs: nounwind
+define void @f0(i8* noalias nocapture readonly %a0, i32 %a1, i32 %a2, i32 %a3, i8* noalias nocapture %a4, i32 %a5) #0 {
+b0:
+  %v0 = sub i32 0, %a1
+  %v1 = getelementptr inbounds i8, i8* %a0, i32 %v0
+  %v2 = getelementptr inbounds i8, i8* %a0, i32 %a1
+  %v3 = mul nsw i32 %a1, 2
+  %v4 = getelementptr inbounds i8, i8* %a0, i32 %v3
+  %v5 = bitcast i8* %a4 to <16 x i32>*
+  %v6 = getelementptr inbounds i8, i8* %a4, i32 %a5
+  %v7 = bitcast i8* %v6 to <16 x i32>*
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.vd0()
+  %v9 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v10 = or i64 undef, 0
+  %v11 = trunc i64 %v10 to i32
+  %v12 = load i8, i8* undef, align 1
+  %v13 = zext i8 %v12 to i64
+  %v14 = shl nuw nsw i64 %v13, 8
+  %v15 = or i64 0, %v14
+  %v16 = trunc i64 %v15 to i32
+  %v17 = load i8, i8* undef, align 1
+  %v18 = zext i8 %v17 to i64
+  %v19 = or i64 0, %v18
+  %v20 = or i64 %v19, 0
+  %v21 = or i64 %v20, 0
+  %v22 = trunc i64 %v21 to i32
+  %v23 = load i8, i8* undef, align 1
+  %v24 = zext i8 %v23 to i64
+  %v25 = shl nuw nsw i64 %v24, 8
+  %v26 = or i64 undef, %v25
+  %v27 = trunc i64 %v26 to i32
+  %v28 = icmp sgt i32 %a2, 64
+  br i1 %v28, label %b1, label %b6
+
+b1:                                               ; preds = %b0
+  %v29 = getelementptr inbounds i8, i8* %v4, i32 64
+  %v30 = bitcast i8* %v29 to <16 x i32>*
+  %v31 = getelementptr inbounds i8, i8* %v2, i32 64
+  %v32 = bitcast i8* %v31 to <16 x i32>*
+  %v33 = getelementptr inbounds i8, i8* %a0, i32 64
+  %v34 = bitcast i8* %v33 to <16 x i32>*
+  %v35 = getelementptr inbounds i8, i8* %v1, i32 64
+  %v36 = bitcast i8* %v35 to <16 x i32>*
+  %v37 = add i32 0, 64
+  %v38 = getelementptr i8, i8* %a4, i32 %v37
+  %v39 = add i32 %a2, -65
+  %v40 = lshr i32 %v39, 6
+  %v41 = add nuw nsw i32 %v40, 1
+  %v42 = and i32 %v41, 3
+  %v43 = icmp eq i32 %v42, 0
+  br i1 undef, label %b2, label %b4
+
+b2:                                               ; preds = %b2, %b1
+  %v44 = phi i32 [ %v144, %b2 ], [ %a2, %b1 ]
+  %v45 = phi <16 x i32> [ %v101, %b2 ], [ %v8, %b1 ]
+  %v46 = phi <16 x i32> [ %v113, %b2 ], [ undef, %b1 ]
+  %v47 = phi <16 x i32> [ %v102, %b2 ], [ %v8, %b1 ]
+  %v48 = phi <16 x i32> [ %v118, %b2 ], [ undef, %b1 ]
+  %v49 = phi <16 x i32>* [ %v112, %b2 ], [ %v36, %b1 ]
+  %v50 = phi <16 x i32>* [ %v114, %b2 ], [ %v34, %b1 ]
+  %v51 = phi <16 x i32>* [ %v116, %b2 ], [ %v32, %b1 ]
+  %v52 = phi <16 x i32>* [ undef, %b2 ], [ %v30, %b1 ]
+  %v53 = phi <16 x i32>* [ %v139, %b2 ], [ %v5, %b1 ]
+  %v54 = phi <16 x i32>* [ %v143, %b2 ], [ %v7, %b1 ]
+  %v55 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v46, <16 x i32> %v45, i32 1)
+  %v56 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> undef, <16 x i32> %v47, i32 1)
+  %v57 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 1
+  %v58 = load <16 x i32>, <16 x i32>* %v49, align 64
+  %v59 = getelementptr inbounds <16 x i32>, <16 x i32>* %v50, i32 1
+  %v60 = load <16 x i32>, <16 x i32>* %v50, align 64
+  %v61 = load <16 x i32>, <16 x i32>* %v51, align 64
+  %v62 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v58, <16 x i32> %v46, i32 1)
+  %v63 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v60, <16 x i32> undef, i32 1)
+  %v64 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v61, <16 x i32> undef, i32 1)
+  %v65 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> %v48, i32 1)
+  %v66 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v62, <16 x i32> %v55)
+  %v67 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v63, <16 x i32> %v56)
+  %v68 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v66, i32 %v11)
+  %v69 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v64, <16 x i32> undef)
+  %v70 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v68, <32 x i32> %v67, i32 %v16)
+  %v71 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v62, <16 x i32> %v63)
+  %v72 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v70, <32 x i32> %v71, i32 %v22)
+  %v73 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v72, <32 x i32> %v69, i32 0)
+  %v74 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v73, <16 x i32> %v64, i32 %v27)
+  %v75 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> zeroinitializer, <16 x i32> %v65, i32 %v27)
+  %v76 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v74)
+  %v77 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v76, <16 x i32> undef, i32 %a3)
+  %v78 = getelementptr inbounds <16 x i32>, <16 x i32>* %v53, i32 1
+  store <16 x i32> %v77, <16 x i32>* %v53, align 64
+  %v79 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v75)
+  %v80 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v79, <16 x i32> undef, i32 %a3)
+  %v81 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 1
+  store <16 x i32> %v80, <16 x i32>* %v54, align 64
+  %v82 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 2
+  %v83 = load <16 x i32>, <16 x i32>* %v57, align 64
+  %v84 = getelementptr inbounds <16 x i32>, <16 x i32>* %v50, i32 2
+  %v85 = load <16 x i32>, <16 x i32>* %v59, align 64
+  %v86 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v87 = load <16 x i32>, <16 x i32>* null, align 64
+  %v88 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v83, <16 x i32> %v58, i32 1)
+  %v89 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v85, <16 x i32> %v60, i32 1)
+  %v90 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v86, <16 x i32> %v61, i32 1)
+  %v91 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v90, <16 x i32> undef)
+  %v92 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> undef, <32 x i32> undef, i32 %v16)
+  %v93 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v88, <16 x i32> %v89)
+  %v94 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v92, <32 x i32> %v93, i32 %v22)
+  %v95 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v94, <32 x i32> %v91, i32 0)
+  %v96 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v95, <16 x i32> %v90, i32 %v27)
+  %v97 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v96)
+  %v98 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v97, <16 x i32> undef, i32 %a3)
+  store <16 x i32> %v98, <16 x i32>* %v78, align 64
+  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 2
+  store <16 x i32> undef, <16 x i32>* %v81, align 64
+  %v100 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 3
+  %v101 = load <16 x i32>, <16 x i32>* %v82, align 64
+  %v102 = load <16 x i32>, <16 x i32>* %v84, align 64
+  %v103 = getelementptr inbounds <16 x i32>, <16 x i32>* %v51, i32 3
+  %v104 = load <16 x i32>, <16 x i32>* null, align 64
+  %v105 = getelementptr inbounds <16 x i32>, <16 x i32>* %v52, i32 3
+  %v106 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v107 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> undef, i32 %a3)
+  store <16 x i32> %v107, <16 x i32>* undef, align 64
+  %v108 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> undef, i32 %a3)
+  %v109 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 3
+  store <16 x i32> %v108, <16 x i32>* %v99, align 64
+  %v110 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v104, <16 x i32> %v86, i32 1)
+  %v111 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> %v106, <16 x i32> %v87, i32 1)
+  %v112 = getelementptr inbounds <16 x i32>, <16 x i32>* %v49, i32 4
+  %v113 = load <16 x i32>, <16 x i32>* %v100, align 64
+  %v114 = getelementptr inbounds <16 x i32>, <16 x i32>* %v50, i32 4
+  %v115 = load <16 x i32>, <16 x i32>* undef, align 64
+  %v116 = getelementptr inbounds <16 x i32>, <16 x i32>* %v51, i32 4
+  %v117 = load <16 x i32>, <16 x i32>* %v103, align 64
+  %v118 = load <16 x i32>, <16 x i32>* %v105, align 64
+  %v119 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v113, <16 x i32> %v101, i32 1)
+  %v120 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v115, <16 x i32> %v102, i32 1)
+  %v121 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v117, <16 x i32> %v104, i32 1)
+  %v122 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v118, <16 x i32> %v106, i32 1)
+  %v123 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v119, <16 x i32> undef)
+  %v124 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v120, <16 x i32> undef)
+  %v125 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v123, i32 %v11)
+  %v126 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32> %v124, i32 %v11)
+  %v127 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v121, <16 x i32> %v110)
+  %v128 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v125, <32 x i32> %v124, i32 %v16)
+  %v129 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v126, <32 x i32> %v127, i32 %v16)
+  %v130 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v128, <32 x i32> undef, i32 %v22)
+  %v131 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> %v129, <32 x i32> undef, i32 %v22)
+  %v132 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v122, <16 x i32> %v111)
+  %v133 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v130, <32 x i32> %v127, i32 0)
+  %v134 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v131, <32 x i32> %v132, i32 0)
+  %v135 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v133, <16 x i32> %v121, i32 %v27)
+  %v136 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v134, <16 x i32> %v122, i32 %v27)
+  %v137 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v135)
+  %v138 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v137, <16 x i32> undef, i32 %a3)
+  %v139 = getelementptr inbounds <16 x i32>, <16 x i32>* %v53, i32 4
+  store <16 x i32> %v138, <16 x i32>* undef, align 64
+  %v140 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v136)
+  %v141 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v136)
+  %v142 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v140, <16 x i32> %v141, i32 %a3)
+  %v143 = getelementptr inbounds <16 x i32>, <16 x i32>* %v54, i32 4
+  store <16 x i32> %v142, <16 x i32>* %v109, align 64
+  %v144 = add nsw i32 %v44, -256
+  %v145 = icmp sgt i32 %v144, 256
+  br i1 %v145, label %b2, label %b3
+
+b3:                                               ; preds = %b2
+  %v146 = phi <16 x i32>* [ %v116, %b2 ]
+  %v147 = phi <16 x i32>* [ %v114, %b2 ]
+  %v148 = phi <16 x i32>* [ %v112, %b2 ]
+  br i1 %v43, label %b5, label %b4
+
+b4:                                               ; preds = %b3, %b1
+  %v149 = phi <16 x i32> [ %v9, %b1 ], [ undef, %b3 ]
+  %v150 = phi <16 x i32>* [ %v36, %b1 ], [ %v148, %b3 ]
+  %v151 = phi <16 x i32>* [ %v34, %b1 ], [ %v147, %b3 ]
+  %v152 = phi <16 x i32>* [ %v32, %b1 ], [ %v146, %b3 ]
+  %v153 = phi <16 x i32>* [ %v5, %b1 ], [ undef, %b3 ]
+  %v154 = load <16 x i32>, <16 x i32>* %v150, align 64
+  %v155 = load <16 x i32>, <16 x i32>* %v151, align 64
+  %v156 = load <16 x i32>, <16 x i32>* %v152, align 64
+  %v157 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v154, <16 x i32> undef, i32 1)
+  %v158 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v155, <16 x i32> undef, i32 1)
+  %v159 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %v156, <16 x i32> %v149, i32 1)
+  %v160 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v157, <16 x i32> %v158)
+  %v161 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v160, i32 %v22)
+  %v162 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v161, <32 x i32> undef, i32 0)
+  %v163 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v162, <16 x i32> %v159, i32 %v27)
+  %v164 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v163)
+  %v165 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> %v164, <16 x i32> undef, i32 %a3)
+  store <16 x i32> %v165, <16 x i32>* %v153, align 64
+  unreachable
+
+b5:                                               ; preds = %b3
+  %v166 = bitcast i8* %v38 to <16 x i32>*
+  br label %b6
+
+b6:                                               ; preds = %b5, %b0
+  %v167 = phi <16 x i32> [ %v8, %b0 ], [ undef, %b5 ]
+  %v168 = phi <16 x i32>* [ %v5, %b0 ], [ %v166, %b5 ]
+  %v169 = phi <16 x i32>* [ %v7, %b0 ], [ undef, %b5 ]
+  %v170 = tail call <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32> undef, <16 x i32> %v167, i32 1)
+  %v171 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> undef, <16 x i32> undef, i32 1)
+  %v172 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> undef, <16 x i32> %v170)
+  %v173 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> undef, <16 x i32> %v171)
+  %v174 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v171, <16 x i32> undef)
+  %v175 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v173, i32 %v22)
+  %v176 = tail call <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32> undef, <32 x i32> %v174, i32 %v22)
+  %v177 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v175, <32 x i32> %v172, i32 0)
+  %v178 = tail call <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32> %v176, <32 x i32> undef, i32 0)
+  %v179 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v177, <16 x i32> undef, i32 %v27)
+  %v180 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v178, <16 x i32> undef, i32 %v27)
+  %v181 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v179)
+  %v182 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> %v181, i32 %a3)
+  store <16 x i32> %v182, <16 x i32>* %v168, align 64
+  %v183 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %v180)
+  %v184 = tail call <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32> undef, <16 x i32> %v183, i32 %a3)
+  store <16 x i32> %v184, <16 x i32>* %v169, align 64
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vd0() #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vlalignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv(<32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vdmpybus.dv.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpabus.acc(<32 x i32>, <32 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vasrhubsat(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lo(<32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv62" "target-features"="+hvx,+hvx-length64b" }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/reg-scavengebug.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,190 @@
+; RUN: llc -O3 -march=hexagon < %s | FileCheck %s
+; CHECK: v{{[0-9]+}}.w = vadd
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind
+define void @f0(i16* noalias nocapture %a0, i32* noalias nocapture readonly %a1, i32 %a2, i8* noalias nocapture readonly %a3) #1 {
+b0:
+  %v0 = add nsw i32 %a2, 63
+  %v1 = ashr i32 %v0, 6
+  %v2 = bitcast i16* %a0 to <16 x i32>*
+  %v3 = bitcast i8* %a3 to <16 x i32>*
+  %v4 = getelementptr inbounds i32, i32* %a1, i32 32
+  %v5 = bitcast i32* %v4 to <16 x i32>*
+  %v6 = load <16 x i32>, <16 x i32>* %v5, align 64, !tbaa !0
+  %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 32768)
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2147450879)
+  %v9 = icmp sgt i32 %v1, 0
+  br i1 %v9, label %b1, label %b4
+
+b1:                                               ; preds = %b0
+  %v10 = bitcast i32* %a1 to <16 x i32>*
+  %v11 = load <16 x i32>, <16 x i32>* %v10, align 64, !tbaa !0
+  %v12 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v6, <16 x i32> %v11, i32 2)
+  %v13 = getelementptr inbounds i32, i32* %a1, i32 48
+  %v14 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v12, <16 x i32> undef)
+  %v15 = bitcast i32* %v13 to <16 x i32>*
+  br i1 undef, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  %v16 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 1
+  %v17 = load <16 x i32>, <16 x i32>* %v16, align 64, !tbaa !0
+  %v18 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v17, <16 x i32> %v6, i32 4)
+  %v19 = load <16 x i32>, <16 x i32>* %v15, align 64, !tbaa !0
+  %v20 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 2
+  %v21 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v18, <16 x i32> %v19)
+  %v22 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 4)
+  %v23 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 8)
+  %v24 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 12)
+  %v25 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v14, <16 x i32> %v22)
+  %v26 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v25, <16 x i32> %v23)
+  %v27 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v26, <16 x i32> %v24)
+  %v28 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v19, <16 x i32> undef, i32 16)
+  %v29 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v27, <16 x i32> %v11)
+  %v30 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v27, <16 x i32> %v28)
+  %v31 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v29, i32 53019433)
+  %v32 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v30, i32 53019433)
+  %v33 = load <16 x i32>, <16 x i32>* %v3, align 64, !tbaa !0
+  %v34 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> %v33)
+  %v35 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> %v34, <16 x i32> %v34)
+  %v36 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v32, <16 x i32> %v31)
+  store <16 x i32> %v36, <16 x i32>* %v2, align 64, !tbaa !0
+  %v37 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 3
+  %v38 = load <16 x i32>, <16 x i32>* %v37, align 64, !tbaa !0
+  %v39 = load <16 x i32>, <16 x i32>* %v20, align 64, !tbaa !0
+  %v40 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 4
+  %v41 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> %v39)
+  %v42 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 4)
+  %v43 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 8)
+  %v44 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v41, <16 x i32> %v21, i32 12)
+  %v45 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v21, <16 x i32> %v42)
+  %v46 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v45, <16 x i32> %v43)
+  %v47 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v46, <16 x i32> %v44)
+  %v48 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v47, <16 x i32> %v6)
+  %v49 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v47, <16 x i32> undef)
+  %v50 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v48, i32 53019433)
+  %v51 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v49, i32 53019433)
+  %v52 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v51, <16 x i32> %v50)
+  %v53 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v52, <16 x i32> undef, i32 56)
+  %v54 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v35)
+  %v55 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %v53, <16 x i32> %v54)
+  %v56 = tail call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %v55, <16 x i32> %v8)
+  %v57 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 undef
+  store <16 x i32> %v56, <16 x i32>* %v57, align 64, !tbaa !0
+  %v58 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 2
+  %v59 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 5
+  %v60 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> zeroinitializer, <16 x i32> %v38, i32 4)
+  %v61 = load <16 x i32>, <16 x i32>* %v40, align 64, !tbaa !0
+  %v62 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v60, <16 x i32> %v61)
+  %v63 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 4)
+  %v64 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 8)
+  %v65 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v62, <16 x i32> %v41, i32 12)
+  %v66 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v41, <16 x i32> %v63)
+  %v67 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v66, <16 x i32> %v64)
+  %v68 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v67, <16 x i32> %v65)
+  %v69 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v61, <16 x i32> %v39, i32 16)
+  %v70 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 1
+  %v71 = load <16 x i32>, <16 x i32>* %v70, align 64, !tbaa !0
+  %v72 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v68, <16 x i32> %v71)
+  %v73 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v68, <16 x i32> %v69)
+  %v74 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v72, i32 53019433)
+  %v75 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v73, i32 53019433)
+  %v76 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v75, <16 x i32> %v74)
+  store <16 x i32> %v76, <16 x i32>* %v58, align 64, !tbaa !0
+  %v77 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 7
+  %v78 = load <16 x i32>, <16 x i32>* %v77, align 64, !tbaa !0
+  %v79 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> undef, <16 x i32> undef)
+  %v80 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 4
+  %v81 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 9
+  %v82 = load <16 x i32>, <16 x i32>* %v81, align 64, !tbaa !0
+  %v83 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v82, <16 x i32> %v78, i32 4)
+  %v84 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 10
+  %v85 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v83, <16 x i32> undef)
+  %v86 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 4)
+  %v87 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 8)
+  %v88 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v85, <16 x i32> %v79, i32 12)
+  %v89 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v79, <16 x i32> %v86)
+  %v90 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v89, <16 x i32> %v87)
+  %v91 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v90, <16 x i32> %v88)
+  %v92 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> undef, i32 16)
+  %v93 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v91, <16 x i32> zeroinitializer)
+  %v94 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v91, <16 x i32> %v92)
+  %v95 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v93, i32 53019433)
+  %v96 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v94, i32 53019433)
+  %v97 = tail call <32 x i32> @llvm.hexagon.V6.vmpyubv(<16 x i32> undef, <16 x i32> undef)
+  %v98 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v96, <16 x i32> %v95)
+  store <16 x i32> %v98, <16 x i32>* %v80, align 64, !tbaa !0
+  %v99 = getelementptr inbounds <16 x i32>, <16 x i32>* %v15, i32 11
+  %v100 = load <16 x i32>, <16 x i32>* %v99, align 64, !tbaa !0
+  %v101 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v100, <16 x i32> %v82, i32 4)
+  %v102 = load <16 x i32>, <16 x i32>* %v84, align 64, !tbaa !0
+  %v103 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v101, <16 x i32> %v102)
+  %v104 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v103, <16 x i32> %v85, i32 4)
+  %v105 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v103, <16 x i32> %v85, i32 8)
+  %v106 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v85, <16 x i32> %v104)
+  %v107 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v106, <16 x i32> %v105)
+  %v108 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v107, <16 x i32> undef)
+  %v109 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v102, <16 x i32> undef, i32 16)
+  %v110 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v108, <16 x i32> %v78)
+  %v111 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v108, <16 x i32> %v109)
+  %v112 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v110, i32 53019433)
+  %v113 = tail call <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32> %v7, <16 x i32> %v111, i32 53019433)
+  %v114 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> %v113, <16 x i32> %v112)
+  %v115 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v114, <16 x i32> undef, i32 56)
+  %v116 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v97)
+  %v117 = tail call <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32> %v115, <16 x i32> %v116)
+  %v118 = tail call <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32> %v117, <16 x i32> %v8)
+  %v119 = getelementptr inbounds <16 x i32>, <16 x i32>* %v2, i32 undef
+  store <16 x i32> %v118, <16 x i32>* %v119, align 64, !tbaa !0
+  %v120 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 6
+  %v121 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> undef)
+  store <16 x i32> %v121, <16 x i32>* %v120, align 64, !tbaa !0
+  unreachable
+
+b3:                                               ; preds = %b1
+  unreachable
+
+b4:                                               ; preds = %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vmpyiwh.acc(<16 x i32>, <16 x i32>, i32) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsubuhsat(<16 x i32>, <16 x i32>) #0
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vminuh(<16 x i32>, <16 x i32>) #0
+
+attributes #0 = { nounwind readnone }
+attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/reg_seq.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/reg_seq.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/reg_seq.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/reg_seq.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,109 @@
+; RUN: llc -march=hexagon -O2 < %s | FileCheck %s
+
+; Generate REG_SEQUENCE instead of combine
+; CHECK-NOT: combine(#0
+
+; Function Attrs: nounwind
+define void @f0(i16* nocapture readonly %a0, i16* nocapture readonly %a1, i16* nocapture %a2, i16* nocapture readonly %a3, i32 %a4) #0 {
+b0:
+  %v0 = lshr i32 %a4, 1
+  %v1 = icmp eq i32 %v0, 0
+  br i1 %v1, label %b3, label %b1
+
+b1:                                               ; preds = %b0
+  %v2 = bitcast i16* %a2 to i64*
+  %v3 = bitcast i16* %a1 to i64*
+  %v4 = bitcast i16* %a0 to i64*
+  br label %b2
+
+b2:                                               ; preds = %b2, %b1
+  %v5 = phi i32 [ 0, %b1 ], [ %v71, %b2 ]
+  %v6 = phi i64* [ %v4, %b1 ], [ %v9, %b2 ]
+  %v7 = phi i64* [ %v3, %b1 ], [ %v11, %b2 ]
+  %v8 = phi i64* [ %v2, %b1 ], [ %v70, %b2 ]
+  %v9 = getelementptr inbounds i64, i64* %v6, i32 1
+  %v10 = load i64, i64* %v6, align 8, !tbaa !0
+  %v11 = getelementptr inbounds i64, i64* %v7, i32 1
+  %v12 = load i64, i64* %v7, align 8, !tbaa !0
+  %v13 = trunc i64 %v10 to i32
+  %v14 = lshr i64 %v10, 32
+  %v15 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v13)
+  %v16 = trunc i64 %v12 to i32
+  %v17 = lshr i64 %v12, 32
+  %v18 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v16)
+  %v19 = trunc i64 %v15 to i32
+  %v20 = lshr i64 %v15, 32
+  %v21 = getelementptr inbounds i16, i16* %a3, i32 %v19
+  %v22 = load i16, i16* %v21, align 2, !tbaa !3
+  %v23 = trunc i64 %v20 to i32
+  %v24 = getelementptr inbounds i16, i16* %a3, i32 %v23
+  %v25 = load i16, i16* %v24, align 2, !tbaa !3
+  %v26 = trunc i64 %v18 to i32
+  %v27 = lshr i64 %v18, 32
+  %v28 = getelementptr inbounds i16, i16* %a3, i32 %v26
+  %v29 = load i16, i16* %v28, align 2, !tbaa !3
+  %v30 = trunc i64 %v27 to i32
+  %v31 = getelementptr inbounds i16, i16* %a3, i32 %v30
+  %v32 = load i16, i16* %v31, align 2, !tbaa !3
+  %v33 = zext i16 %v32 to i64
+  %v34 = shl nuw nsw i64 %v33, 32
+  %v35 = zext i16 %v29 to i64
+  %v36 = or i64 %v35, %v34
+  %v37 = zext i16 %v25 to i64
+  %v38 = shl nuw nsw i64 %v37, 32
+  %v39 = zext i16 %v22 to i64
+  %v40 = or i64 %v39, %v38
+  %v41 = tail call i64 @llvm.hexagon.S2.vtrunewh(i64 %v36, i64 %v40)
+  %v42 = getelementptr inbounds i64, i64* %v8, i32 1
+  store i64 %v41, i64* %v8, align 8, !tbaa !0
+  %v43 = trunc i64 %v14 to i32
+  %v44 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v43)
+  %v45 = trunc i64 %v17 to i32
+  %v46 = tail call i64 @llvm.hexagon.S2.vzxthw(i32 %v45)
+  %v47 = trunc i64 %v44 to i32
+  %v48 = lshr i64 %v44, 32
+  %v49 = getelementptr inbounds i16, i16* %a3, i32 %v47
+  %v50 = load i16, i16* %v49, align 2, !tbaa !3
+  %v51 = trunc i64 %v48 to i32
+  %v52 = getelementptr inbounds i16, i16* %a3, i32 %v51
+  %v53 = load i16, i16* %v52, align 2, !tbaa !3
+  %v54 = trunc i64 %v46 to i32
+  %v55 = lshr i64 %v46, 32
+  %v56 = getelementptr inbounds i16, i16* %a3, i32 %v54
+  %v57 = load i16, i16* %v56, align 2, !tbaa !3
+  %v58 = trunc i64 %v55 to i32
+  %v59 = getelementptr inbounds i16, i16* %a3, i32 %v58
+  %v60 = load i16, i16* %v59, align 2, !tbaa !3
+  %v61 = zext i16 %v60 to i64
+  %v62 = shl nuw nsw i64 %v61, 32
+  %v63 = zext i16 %v57 to i64
+  %v64 = or i64 %v63, %v62
+  %v65 = zext i16 %v53 to i64
+  %v66 = shl nuw nsw i64 %v65, 32
+  %v67 = zext i16 %v50 to i64
+  %v68 = or i64 %v67, %v66
+  %v69 = tail call i64 @llvm.hexagon.S2.vtrunewh(i64 %v64, i64 %v68)
+  %v70 = getelementptr inbounds i64, i64* %v8, i32 2
+  store i64 %v69, i64* %v42, align 8, !tbaa !0
+  %v71 = add nsw i32 %v5, 1
+  %v72 = icmp ult i32 %v71, %v0
+  br i1 %v72, label %b2, label %b3
+
+b3:                                               ; preds = %b2, %b0
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.vzxthw(i32) #1
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.hexagon.S2.vtrunewh(i64, i64) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" }
+attributes #1 = { nounwind readnone }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2, i64 0}
+!2 = !{!"Simple C/C++ TBAA"}
+!3 = !{!4, !4, i64 0}
+!4 = !{!"short", !1, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/registerscav-missing-spill-slot.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,182 @@
+; RUN: llc -march=hexagon -machine-sink-split=0 < %s
+; REQUIRES: asserts
+; Used to fail with: Assertion `ScavengingFrameIndex >= 0 && "Cannot scavenge register without an emergency spill slot!"' failed.
+
+target triple = "hexagon-unknown-linux-gnu"
+
+%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, i8*, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x i8*], [32 x i8*], [32 x i8], i32 }
+
+; Function Attrs: nounwind
+define void @f0() #0 {
+b0:
+  %v0 = call i8* @f2()
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  ret void
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  unreachable
+
+b4:                                               ; preds = %b2
+  br i1 undef, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  unreachable
+
+b6:                                               ; preds = %b4
+  %v1 = call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  br i1 undef, label %b7, label %b20
+
+b7:                                               ; preds = %b6
+  switch i32 undef, label %b8 [
+    i32 6, label %b9
+    i32 1, label %b14
+    i32 2, label %b13
+    i32 3, label %b12
+    i32 4, label %b11
+    i32 5, label %b10
+  ]
+
+b8:                                               ; preds = %b7
+  br label %b9
+
+b9:                                               ; preds = %b8, %b7
+  unreachable
+
+b10:                                              ; preds = %b7
+  unreachable
+
+b11:                                              ; preds = %b7
+  unreachable
+
+b12:                                              ; preds = %b7
+  unreachable
+
+b13:                                              ; preds = %b7
+  unreachable
+
+b14:                                              ; preds = %b7
+  %v2 = call %s.0* bitcast (%s.0* (...)* @f3 to %s.0* (i32)*)(i32 0) #0
+  br label %b15
+
+b15:                                              ; preds = %b15, %b14
+  %v3 = bitcast i8* undef to double*
+  %v4 = fadd double undef, undef
+  br i1 undef, label %b16, label %b15
+
+b16:                                              ; preds = %b15
+  switch i32 undef, label %b18 [
+    i32 0, label %b19
+    i32 2, label %b17
+  ]
+
+b17:                                              ; preds = %b16
+  %v5 = getelementptr i8, i8* %v0, i32 0
+  %v6 = bitcast i8* %v5 to double*
+  %v7 = or i32 0, 16
+  %v8 = getelementptr i8, i8* %v0, i32 %v7
+  %v9 = bitcast i8* %v8 to double*
+  %v10 = load double, double* undef, align 8, !tbaa !0
+  %v11 = fcmp olt double -1.000000e+11, %v10
+  %v12 = select i1 %v11, double %v10, double -1.000000e+11
+  %v13 = load double, double* %v6, align 8, !tbaa !0
+  %v14 = fcmp olt double -1.000000e+11, %v13
+  %v15 = select i1 %v14, double %v13, double -1.000000e+11
+  %v16 = load double, double* %v9, align 8, !tbaa !0
+  %v17 = fcmp olt double -1.000000e+11, %v16
+  %v18 = select i1 %v17, double %v16, double -1.000000e+11
+  %v19 = fcmp ogt double 1.000000e+11, %v13
+  %v20 = select i1 %v19, double %v13, double 1.000000e+11
+  %v21 = fcmp ogt double 1.000000e+11, %v16
+  %v22 = select i1 %v21, double %v16, double 1.000000e+11
+  br label %b18
+
+b18:                                              ; preds = %b17, %b16
+  %v23 = phi double [ %v12, %b17 ], [ -1.000000e+11, %b16 ]
+  %v24 = phi double [ %v15, %b17 ], [ -1.000000e+11, %b16 ]
+  %v25 = phi double [ %v20, %b17 ], [ 1.000000e+11, %b16 ]
+  %v26 = phi double [ %v18, %b17 ], [ -1.000000e+11, %b16 ]
+  %v27 = phi double [ %v22, %b17 ], [ 1.000000e+11, %b16 ]
+  %v28 = load double, double* undef, align 8, !tbaa !0
+  %v29 = select i1 undef, double %v28, double %v23
+  %v30 = load double, double* null, align 8, !tbaa !0
+  %v31 = select i1 undef, double %v30, double %v24
+  %v32 = load double, double* undef, align 8, !tbaa !0
+  %v33 = select i1 undef, double %v32, double %v26
+  %v34 = select i1 undef, double %v30, double %v25
+  %v35 = select i1 undef, double %v32, double %v27
+  br i1 false, label %b20, label %b19
+
+b19:                                              ; preds = %b19, %b18, %b16
+  %v36 = phi double [ %v75, %b19 ], [ -1.000000e+11, %b16 ], [ %v29, %b18 ]
+  %v37 = phi double [ %v81, %b19 ], [ 1.000000e+11, %b16 ], [ undef, %b18 ]
+  %v38 = phi double [ %v78, %b19 ], [ -1.000000e+11, %b16 ], [ %v31, %b18 ]
+  %v39 = phi double [ %v82, %b19 ], [ 1.000000e+11, %b16 ], [ %v34, %b18 ]
+  %v40 = phi double [ %v80, %b19 ], [ -1.000000e+11, %b16 ], [ %v33, %b18 ]
+  %v41 = phi double [ %v84, %b19 ], [ 1.000000e+11, %b16 ], [ %v35, %b18 ]
+  %v42 = getelementptr i8, i8* %v0, i32 0
+  %v43 = bitcast i8* %v42 to double*
+  %v44 = load double, double* null, align 8, !tbaa !0
+  %v45 = select i1 undef, double %v44, double %v36
+  %v46 = load double, double* %v43, align 8, !tbaa !0
+  %v47 = select i1 undef, double %v46, double %v38
+  %v48 = load double, double* undef, align 8, !tbaa !0
+  %v49 = select i1 undef, double %v48, double %v40
+  %v50 = select i1 undef, double %v44, double %v37
+  %v51 = fcmp ogt double %v39, %v46
+  %v52 = select i1 %v51, double %v46, double %v39
+  %v53 = select i1 undef, double %v48, double %v41
+  %v54 = load double, double* null, align 8, !tbaa !0
+  %v55 = select i1 undef, double %v54, double %v45
+  %v56 = load double, double* undef, align 8, !tbaa !0
+  %v57 = select i1 undef, double %v56, double %v47
+  %v58 = load double, double* undef, align 8, !tbaa !0
+  %v59 = select i1 undef, double %v58, double %v49
+  %v60 = select i1 undef, double %v54, double %v50
+  %v61 = select i1 undef, double %v56, double %v52
+  %v62 = select i1 false, double %v58, double %v53
+  %v63 = load double, double* undef, align 8, !tbaa !0
+  %v64 = select i1 undef, double %v63, double %v55
+  %v65 = load double, double* undef, align 8, !tbaa !0
+  %v66 = select i1 undef, double %v65, double %v57
+  %v67 = load double, double* null, align 8, !tbaa !0
+  %v68 = select i1 undef, double %v67, double %v59
+  %v69 = fcmp ogt double %v60, %v63
+  %v70 = select i1 %v69, double %v63, double %v60
+  %v71 = select i1 false, double %v65, double %v61
+  %v72 = select i1 false, double %v67, double %v62
+  %v73 = load double, double* null, align 8, !tbaa !0
+  %v74 = fcmp olt double %v64, %v73
+  %v75 = select i1 %v74, double %v73, double %v64
+  %v76 = load double, double* null, align 8, !tbaa !0
+  %v77 = fcmp olt double %v66, %v76
+  %v78 = select i1 %v77, double %v76, double %v66
+  %v79 = fcmp olt double %v68, 0.000000e+00
+  %v80 = select i1 %v79, double 0.000000e+00, double %v68
+  %v81 = select i1 undef, double %v73, double %v70
+  %v82 = select i1 undef, double %v76, double %v71
+  %v83 = fcmp ogt double %v72, 0.000000e+00
+  %v84 = select i1 %v83, double 0.000000e+00, double %v72
+  br i1 false, label %b20, label %b19
+
+b20:                                              ; preds = %b19, %b18, %b6
+  unreachable
+}
+
+declare i32 @f1(...)
+
+; Function Attrs: nounwind
+declare noalias i8* @f2() #0
+
+declare %s.0* @f3(...)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"double", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/registerscavenger-fail1.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/registerscavenger-fail1.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/registerscavenger-fail1.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/registerscavenger-fail1.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,296 @@
+; RUN: llc -march=hexagon -machine-sink-split=0 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon-unknown-linux-gnu"
+
+%s.0 = type { double, double, double, double, double, double, i32, double, double, double, double, i8*, i8, [9 x i8], double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, double, [200 x i8*], [32 x i8*], [32 x i8], i32 }
+
+ at g0 = external unnamed_addr constant [6 x i8], align 8
+
+; Function Attrs: nounwind
+define i32 @f0(double %a0) #0 {
+b0:
+  %v0 = call double bitcast (double (...)* @f1 to double (i8*)*)(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @g0, i32 0, i32 0)) #0
+  %v1 = call i32 bitcast (i32 (...)* @f2 to i32 ()*)() #0
+  %v2 = call i8* @f3(i32 undef)
+  br i1 undef, label %b1, label %b2
+
+b1:                                               ; preds = %b0
+  unreachable
+
+b2:                                               ; preds = %b0
+  br i1 undef, label %b3, label %b4
+
+b3:                                               ; preds = %b2
+  unreachable
+
+b4:                                               ; preds = %b2
+  %v3 = mul i32 %v1, 12
+  br i1 undef, label %b5, label %b6
+
+b5:                                               ; preds = %b4
+  ret i32 0
+
+b6:                                               ; preds = %b4
+  %v4 = call i32 bitcast (i32 (...)* @f2 to i32 ()*)() #0
+  br i1 undef, label %b7, label %b24
+
+b7:                                               ; preds = %b6
+  switch i32 undef, label %b8 [
+    i32 0, label %b15
+    i32 1, label %b14
+    i32 2, label %b13
+    i32 3, label %b12
+    i32 4, label %b11
+    i32 5, label %b10
+    i32 6, label %b9
+  ]
+
+b8:                                               ; preds = %b7
+  unreachable
+
+b9:                                               ; preds = %b7
+  br label %b10
+
+b10:                                              ; preds = %b9, %b7
+  unreachable
+
+b11:                                              ; preds = %b7
+  unreachable
+
+b12:                                              ; preds = %b7
+  br label %b13
+
+b13:                                              ; preds = %b12, %b7
+  unreachable
+
+b14:                                              ; preds = %b7
+  %v5 = call %s.0* bitcast (%s.0* (...)* @f4 to %s.0* (i32)*)(i32 0) #0
+  %v6 = icmp ult i32 %v4, 8
+  br i1 %v6, label %b16, label %b15
+
+b15:                                              ; preds = %b14, %b7
+  unreachable
+
+b16:                                              ; preds = %b14
+  %v7 = and i32 %v4, 3
+  br i1 undef, label %b17, label %b18
+
+b17:                                              ; preds = %b16
+  br i1 undef, label %b19, label %b18
+
+b18:                                              ; preds = %b18, %b17, %b16
+  %v8 = phi i32 [ %v10, %b18 ], [ 0, %b16 ], [ undef, %b17 ]
+  %v9 = shl i32 %v8, 5
+  %v10 = add nsw i32 %v8, 4
+  %v11 = icmp eq i32 %v10, %v4
+  br i1 %v11, label %b19, label %b18
+
+b19:                                              ; preds = %b18, %b17
+  br i1 undef, label %b20, label %b23
+
+b20:                                              ; preds = %b19
+  %v12 = icmp eq i32 %v7, 2
+  br i1 %v12, label %b21, label %b22
+
+b21:                                              ; preds = %b20
+  %v13 = getelementptr i8, i8* %v2, i32 0
+  %v14 = bitcast i8* %v13 to double*
+  %v15 = or i32 0, 16
+  %v16 = getelementptr i8, i8* %v2, i32 %v15
+  %v17 = bitcast i8* %v16 to double*
+  %v18 = load double, double* undef, align 8, !tbaa !0
+  %v19 = fcmp olt double -1.000000e+11, %v18
+  %v20 = select i1 %v19, double %v18, double -1.000000e+11
+  %v21 = load double, double* %v14, align 8, !tbaa !0
+  %v22 = fcmp olt double -1.000000e+11, %v21
+  %v23 = select i1 %v22, double %v21, double -1.000000e+11
+  %v24 = load double, double* %v17, align 8, !tbaa !0
+  %v25 = fcmp olt double -1.000000e+11, %v24
+  %v26 = select i1 %v25, double %v24, double -1.000000e+11
+  %v27 = fcmp ogt double 1.000000e+11, %v18
+  %v28 = select i1 %v27, double %v18, double 1.000000e+11
+  %v29 = fcmp ogt double 1.000000e+11, %v21
+  %v30 = select i1 %v29, double %v21, double 1.000000e+11
+  %v31 = fcmp ogt double 1.000000e+11, %v24
+  %v32 = select i1 %v31, double %v24, double 1.000000e+11
+  %v33 = add i32 0, 1
+  %v34 = getelementptr i8, i8* %v2, i32 32
+  br label %b22
+
+b22:                                              ; preds = %b21, %b20
+  %v35 = phi double [ %v20, %b21 ], [ -1.000000e+11, %b20 ]
+  %v36 = phi double [ %v28, %b21 ], [ 1.000000e+11, %b20 ]
+  %v37 = phi double [ %v23, %b21 ], [ -1.000000e+11, %b20 ]
+  %v38 = phi double [ %v30, %b21 ], [ 1.000000e+11, %b20 ]
+  %v39 = phi double [ %v26, %b21 ], [ -1.000000e+11, %b20 ]
+  %v40 = phi double [ %v32, %b21 ], [ 1.000000e+11, %b20 ]
+  %v41 = phi i8* [ %v34, %b21 ], [ %v2, %b20 ]
+  %v42 = phi i32 [ %v33, %b21 ], [ 0, %b20 ]
+  %v43 = shl nsw i32 %v42, 5
+  %v44 = bitcast i8* %v41 to double*
+  %v45 = or i32 %v43, 8
+  %v46 = getelementptr i8, i8* %v2, i32 %v45
+  %v47 = bitcast i8* %v46 to double*
+  %v48 = load double, double* %v44, align 8, !tbaa !0
+  %v49 = select i1 undef, double %v48, double %v35
+  %v50 = load double, double* %v47, align 8, !tbaa !0
+  %v51 = fcmp olt double %v37, %v50
+  %v52 = select i1 %v51, double %v50, double %v37
+  %v53 = load double, double* undef, align 8, !tbaa !0
+  %v54 = fcmp olt double %v39, %v53
+  %v55 = select i1 %v54, double %v53, double %v39
+  %v56 = fcmp ogt double %v36, %v48
+  %v57 = select i1 %v56, double %v48, double %v36
+  %v58 = fcmp ogt double %v38, %v50
+  %v59 = select i1 %v58, double %v50, double %v38
+  %v60 = select i1 undef, double %v53, double %v40
+  %v61 = add i32 %v42, 1
+  br i1 undef, label %b24, label %b23
+
+b23:                                              ; preds = %b23, %b22, %b19
+  %v62 = phi double [ %v79, %b23 ], [ 1.000000e+11, %b19 ], [ %v57, %b22 ]
+  %v63 = phi double [ %v81, %b23 ], [ 1.000000e+11, %b19 ], [ %v59, %b22 ]
+  %v64 = phi i32 [ %v82, %b23 ], [ 0, %b19 ], [ %v61, %b22 ]
+  %v65 = shl i32 %v64, 5
+  %v66 = load double, double* undef, align 8, !tbaa !0
+  %v67 = load double, double* undef, align 8, !tbaa !0
+  %v68 = select i1 undef, double %v66, double %v62
+  %v69 = select i1 undef, double %v67, double %v63
+  %v70 = load double, double* undef, align 8, !tbaa !0
+  %v71 = select i1 false, double 0.000000e+00, double %v68
+  %v72 = select i1 undef, double %v70, double %v69
+  %v73 = bitcast i8* undef to double*
+  %v74 = load double, double* undef, align 8, !tbaa !0
+  %v75 = fcmp ogt double %v71, 0.000000e+00
+  %v76 = select i1 %v75, double 0.000000e+00, double %v71
+  %v77 = select i1 undef, double %v74, double %v72
+  %v78 = load double, double* undef, align 8, !tbaa !0
+  %v79 = select i1 undef, double %v78, double %v76
+  %v80 = fcmp ogt double %v77, 0.000000e+00
+  %v81 = select i1 %v80, double 0.000000e+00, double %v77
+  %v82 = add i32 %v64, 4
+  %v83 = icmp eq i32 %v82, %v4
+  br i1 %v83, label %b24, label %b23
+
+b24:                                              ; preds = %b23, %b22, %b6
+  %v84 = phi double [ -1.000000e+11, %b6 ], [ %v49, %b22 ], [ undef, %b23 ]
+  %v85 = phi double [ -1.000000e+11, %b6 ], [ %v52, %b22 ], [ 0.000000e+00, %b23 ]
+  %v86 = phi double [ -1.000000e+11, %b6 ], [ %v55, %b22 ], [ 0.000000e+00, %b23 ]
+  %v87 = phi double [ 1.000000e+11, %b6 ], [ %v60, %b22 ], [ undef, %b23 ]
+  %v88 = fsub double %v84, undef
+  %v89 = fsub double %v85, undef
+  %v90 = fadd double undef, 1.000000e+00
+  %v91 = fptosi double %v90 to i32
+  %v92 = fsub double %v86, %v87
+  %v93 = fdiv double %v92, %v0
+  %v94 = fadd double %v93, 1.000000e+00
+  %v95 = fptosi double %v94 to i32
+  br i1 undef, label %b25, label %b27
+
+b25:                                              ; preds = %b24
+  %v96 = fdiv double %v88, 0.000000e+00
+  %v97 = fadd double %v96, 1.000000e+00
+  %v98 = fptosi double %v97 to i32
+  %v99 = fdiv double %v89, 0.000000e+00
+  %v100 = fadd double %v99, 1.000000e+00
+  %v101 = fptosi double %v100 to i32
+  %v102 = fadd double undef, 1.000000e+00
+  %v103 = fptosi double %v102 to i32
+  %v104 = call i8* @f3(i32 undef)
+  br i1 false, label %b26, label %b27
+
+b26:                                              ; preds = %b25
+  unreachable
+
+b27:                                              ; preds = %b25, %b24
+  %v105 = phi i8* [ %v104, %b25 ], [ undef, %b24 ]
+  %v106 = phi i32 [ %v103, %b25 ], [ %v95, %b24 ]
+  %v107 = phi i32 [ %v101, %b25 ], [ %v91, %b24 ]
+  %v108 = phi i32 [ %v98, %b25 ], [ undef, %b24 ]
+  %v109 = phi double [ 0.000000e+00, %b25 ], [ %v0, %b24 ]
+  %v110 = mul i32 %v108, 232
+  %v111 = icmp sgt i32 %v106, 0
+  %v112 = mul i32 %v107, 232
+  %v113 = mul i32 %v112, %v108
+  %v114 = fmul double %v109, 5.000000e-01
+  %v115 = and i32 %v106, 3
+  %v116 = icmp ult i32 %v106, 4
+  br label %b28
+
+b28:                                              ; preds = %b35, %b27
+  %v117 = phi i32 [ %v146, %b35 ], [ 0, %b27 ]
+  %v118 = mul i32 %v117, 232
+  br i1 undef, label %b29, label %b35
+
+b29:                                              ; preds = %b28
+  %v119 = add i32 %v118, 8
+  %v120 = add i32 %v118, 16
+  br i1 %v111, label %b30, label %b35
+
+b30:                                              ; preds = %b34, %b29
+  %v121 = phi i32 [ %v144, %b34 ], [ 0, %b29 ]
+  %v122 = mul i32 %v110, %v121
+  %v123 = add i32 %v119, %v122
+  %v124 = add i32 %v120, %v122
+  %v125 = sitofp i32 %v121 to double
+  %v126 = fmul double %v125, %v109
+  %v127 = fadd double %v126, %v114
+  %v128 = fadd double %v127, undef
+  switch i32 %v115, label %b33 [
+    i32 2, label %b31
+    i32 1, label %b32
+  ]
+
+b31:                                              ; preds = %b30
+  %v129 = add i32 %v123, 0
+  %v130 = getelementptr i8, i8* %v105, i32 %v129
+  %v131 = bitcast i8* %v130 to double*
+  store double %v128, double* %v131, align 8, !tbaa !0
+  br label %b32
+
+b32:                                              ; preds = %b31, %b30
+  %v132 = add nsw i32 0, 1
+  br i1 %v116, label %b34, label %b33
+
+b33:                                              ; preds = %b33, %b32, %b30
+  %v133 = phi i32 [ %v142, %b33 ], [ 0, %b30 ], [ %v132, %b32 ]
+  %v134 = mul i32 %v113, %v133
+  %v135 = add i32 %v124, %v134
+  %v136 = getelementptr i8, i8* %v105, i32 %v135
+  %v137 = bitcast i8* %v136 to double*
+  %v138 = sitofp i32 %v133 to double
+  store double undef, double* %v137, align 8, !tbaa !0
+  %v139 = fmul double undef, %v109
+  %v140 = fadd double %v139, %v114
+  %v141 = fadd double %v140, %v87
+  store double %v141, double* undef, align 8, !tbaa !0
+  %v142 = add nsw i32 %v133, 4
+  %v143 = icmp eq i32 %v142, %v106
+  br i1 %v143, label %b34, label %b33
+
+b34:                                              ; preds = %b33, %b32
+  %v144 = add i32 %v121, 1
+  %v145 = icmp eq i32 %v144, %v107
+  br i1 %v145, label %b35, label %b30
+
+b35:                                              ; preds = %b34, %b29, %b28
+  %v146 = add i32 %v117, 1
+  br label %b28
+}
+
+declare double @f1(...)
+
+declare i32 @f2(...)
+
+; Function Attrs: nounwind
+declare noalias i8* @f3(i32) #0
+
+declare %s.0* @f4(...)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"double", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/regp-underflow.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/regp-underflow.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/regp-underflow.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/regp-underflow.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,102 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+
+target triple = "hexagon-unknown--elf"
+
+ at g0 = global i32 0, align 4
+ at g1 = global i32 0, align 4
+ at g2 = global i32 0, align 4
+ at g3 = global i32 0, align 4
+ at g4 = common global [100 x i32] zeroinitializer, align 8
+ at g5 = common global [100 x i32] zeroinitializer, align 8
+ at g6 = private unnamed_addr constant [13 x i8] c"ping started\00"
+ at g7 = private unnamed_addr constant [13 x i8] c"pong started\00"
+
+; Function Attrs: nounwind
+define void @f0(i8* nocapture readnone %a0) #0 {
+b0:
+  tail call void @f1(i8* %a0, i32 0)
+  ret void
+}
+
+; Function Attrs: nounwind
+define internal void @f1(i8* nocapture readnone %a0, i32 %a1) #0 {
+b0:
+  %v0 = icmp eq i32 %a1, 1
+  br i1 %v0, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v1 = tail call i32 @f3(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g6, i32 0, i32 0))
+  store volatile i32 1, i32* @g0, align 4, !tbaa !0
+  br label %b3
+
+b2:                                               ; preds = %b0
+  %v2 = tail call i32 @f3(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @g7, i32 0, i32 0))
+  store volatile i32 1, i32* @g1, align 4, !tbaa !0
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2, %b1
+  %v3 = load volatile i32, i32* @g2, align 4, !tbaa !0
+  %v4 = icmp eq i32 %v3, 0
+  br i1 %v4, label %b3, label %b4
+
+b4:                                               ; preds = %b3
+  %v5 = select i1 %v0, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @g5, i32 0, i32 0), i32* getelementptr inbounds ([100 x i32], [100 x i32]* @g4, i32 0, i32 0)
+  br label %b5
+
+b5:                                               ; preds = %b5, %b4
+  %v6 = phi i32* [ %v5, %b4 ], [ %v29, %b5 ]
+  %v7 = phi i32 [ 0, %b4 ], [ %v27, %b5 ]
+  %v8 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v8, i32* %v6, align 4, !tbaa !0
+  %v9 = getelementptr i32, i32* %v6, i32 1
+  %v10 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v10, i32* %v9, align 4, !tbaa !0
+  %v11 = getelementptr i32, i32* %v6, i32 2
+  %v12 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v12, i32* %v11, align 4, !tbaa !0
+  %v13 = getelementptr i32, i32* %v6, i32 3
+  %v14 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v14, i32* %v13, align 4, !tbaa !0
+  %v15 = getelementptr i32, i32* %v6, i32 4
+  %v16 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v16, i32* %v15, align 4, !tbaa !0
+  %v17 = getelementptr i32, i32* %v6, i32 5
+  %v18 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v18, i32* %v17, align 4, !tbaa !0
+  %v19 = getelementptr i32, i32* %v6, i32 6
+  %v20 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v20, i32* %v19, align 4, !tbaa !0
+  %v21 = getelementptr i32, i32* %v6, i32 7
+  %v22 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v22, i32* %v21, align 4, !tbaa !0
+  %v23 = getelementptr i32, i32* %v6, i32 8
+  %v24 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v24, i32* %v23, align 4, !tbaa !0
+  %v25 = getelementptr i32, i32* %v6, i32 9
+  %v26 = tail call i32 asm sideeffect "1:     $0 = memw_locked($2)\0A       $0 = add($0, $3)\0A       memw_locked($2, p0) = $0\0A       if !p0 jump 1b\0A", "=&r,=*m,r,r,*m,~{p0}"(i32* @g3, i32* @g3, i32 1, i32* @g3), !srcloc !4
+  store i32 %v26, i32* %v25, align 4, !tbaa !0
+  %v27 = add nsw i32 %v7, 10
+  %v28 = icmp eq i32 %v27, 100
+  %v29 = getelementptr i32, i32* %v6, i32 10
+  br i1 %v28, label %b6, label %b5
+
+b6:                                               ; preds = %b5
+  tail call void @f2(i32 0) #1
+  ret void
+}
+
+; Function Attrs: nounwind
+declare void @f2(i32) #1
+
+; Function Attrs: nounwind
+declare i32 @f3(i8* nocapture readonly) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-fatures"="+hvx,+hvx-length64b" }
+attributes #1 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{i32 12730, i32 12771, i32 12807, i32 12851}

Added: llvm/trunk/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/regscav-wrong-super-sub-regs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,199 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that register scvenging does not assert because of wrong
+; bits being set for Kill and Def bit vectors in replaceSuperBySubRegs
+
+%s.0 = type { i32, i32*, [0 x i32], [0 x i32], [1 x i32] }
+%s.1 = type { %s.2, %s.4, %s.5 }
+%s.2 = type { %s.3 }
+%s.3 = type { i32 }
+%s.4 = type { i32 }
+%s.5 = type { [0 x i32], [0 x i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*] }
+
+ at g0 = common global i32 0, align 4
+ at g1 = common global %s.0 zeroinitializer, align 4
+ at g2 = common global i32 0, align 4
+ at g3 = common global i32 0, align 4
+ at g4 = common global i32* null, align 4
+ at g5 = common global i32 0, align 4
+ at g6 = common global i32 0, align 4
+
+; Function Attrs: nounwind
+define i32 @f0(%s.1* nocapture readonly %a0) #0 {
+b0:
+  %v0 = alloca [0 x i32], align 4
+  %v1 = load i32, i32* @g0, align 4, !tbaa !0
+  %v2 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 0, i32 0, i32 0
+  %v3 = load i32, i32* %v2, align 4, !tbaa !0
+  %v4 = load i32*, i32** getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 1), align 4, !tbaa !4
+  %v5 = load i32, i32* @g2, align 4, !tbaa !0
+  %v6 = sub i32 0, %v5
+  %v7 = getelementptr inbounds i32, i32* %v4, i32 %v6
+  %v8 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 1, i32 0
+  %v9 = load i32, i32* %v8, align 4, !tbaa !0
+  switch i32 %v9, label %b17 [
+    i32 0, label %b1
+    i32 1, label %b2
+  ]
+
+b1:                                               ; preds = %b0
+  store i32 0, i32* @g3, align 4, !tbaa !0
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  %v10 = icmp eq i32 %v1, 0
+  %v11 = icmp sgt i32 %v3, 0
+  %v12 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 0
+  %v13 = sdiv i32 %v3, 2
+  %v14 = add i32 %v13, -1
+  %v15 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 1
+  %v16 = getelementptr inbounds [0 x i32], [0 x i32]* %v0, i32 0, i32 2
+  %v17 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2, i32 1, i32 %v1
+  %v18 = getelementptr inbounds %s.1, %s.1* %a0, i32 0, i32 2, i32 1, i32 0
+  %v19 = sub i32 1, %v5
+  %v20 = getelementptr inbounds i32, i32* %v4, i32 %v19
+  %v21 = sdiv i32 %v3, 4
+  %v22 = icmp slt i32 %v3, -3
+  %v23 = add i32 %v3, -1
+  %v24 = lshr i32 %v23, 2
+  %v25 = mul i32 %v24, 4
+  %v26 = add i32 %v25, 4
+  %v27 = add i32 %v13, -2
+  %v28 = icmp slt i32 %v26, 0
+  %v29 = add i32 %v21, 1
+  %v30 = select i1 %v22, i32 1, i32 %v29
+  br label %b4
+
+b3:                                               ; preds = %b16
+  store i32 %v30, i32* @g3, align 4, !tbaa !0
+  br label %b4
+
+b4:                                               ; preds = %b13, %b3, %b2
+  %v31 = phi i32 [ undef, %b2 ], [ %v87, %b3 ], [ %v87, %b13 ]
+  %v32 = phi i32 [ undef, %b2 ], [ %v86, %b3 ], [ %v86, %b13 ]
+  %v33 = phi i32 [ undef, %b2 ], [ %v35, %b3 ], [ %v35, %b13 ]
+  %v34 = phi i32 [ undef, %b2 ], [ %v89, %b3 ], [ %v89, %b13 ]
+  %v35 = phi i32 [ undef, %b2 ], [ %v94, %b3 ], [ %v65, %b13 ]
+  br i1 %v10, label %b6, label %b5
+
+b5:                                               ; preds = %b5, %b4
+  br label %b5
+
+b6:                                               ; preds = %b4
+  br i1 %v11, label %b8, label %b7
+
+b7:                                               ; preds = %b6
+  store i32 0, i32* @g3, align 4, !tbaa !0
+  br label %b11
+
+b8:                                               ; preds = %b6
+  store i32 %v26, i32* @g3, align 4, !tbaa !0
+  br i1 %v28, label %b9, label %b11
+
+b9:                                               ; preds = %b8
+  %v36 = load i32*, i32** @g4, align 4, !tbaa !7
+  br label %b10
+
+b10:                                              ; preds = %b10, %b9
+  %v37 = phi i32 [ %v26, %b9 ], [ %v45, %b10 ]
+  %v38 = phi i32 [ %v34, %b9 ], [ %v44, %b10 ]
+  %v39 = add nsw i32 %v37, %v33
+  %v40 = shl i32 %v39, 1
+  %v41 = getelementptr inbounds i32, i32* %v36, i32 %v40
+  %v42 = load i32, i32* %v41, align 4, !tbaa !0
+  %v43 = icmp slt i32 %v42, %v31
+  %v44 = select i1 %v43, i32 0, i32 %v38
+  %v45 = add nsw i32 %v37, 1
+  store i32 %v45, i32* @g3, align 4, !tbaa !0
+  %v46 = icmp slt i32 %v45, 0
+  br i1 %v46, label %b10, label %b11
+
+b11:                                              ; preds = %b10, %b8, %b7
+  %v47 = phi i32 [ %v26, %b8 ], [ 0, %b7 ], [ 0, %b10 ]
+  %v48 = phi i32 [ %v34, %b8 ], [ %v34, %b7 ], [ %v44, %b10 ]
+  %v49 = load i32, i32* @g5, align 4, !tbaa !0
+  %v50 = icmp slt i32 %v13, %v49
+  %v51 = icmp slt i32 %v47, %v14
+  %v52 = and i1 %v50, %v51
+  br i1 %v52, label %b12, label %b13
+
+b12:                                              ; preds = %b11
+  %v53 = sub i32 %v27, %v47
+  %v54 = lshr i32 %v53, 1
+  %v55 = mul i32 %v54, 2
+  %v56 = add i32 %v47, 2
+  %v57 = add i32 %v56, %v55
+  store i32 %v57, i32* @g3, align 4, !tbaa !0
+  br label %b13
+
+b13:                                              ; preds = %b12, %b11
+  %v58 = shl i32 %v35, 2
+  %v59 = load i32*, i32** @g4, align 4, !tbaa !7
+  %v60 = getelementptr inbounds i32, i32* %v59, i32 %v58
+  %v61 = load i32, i32* %v60, align 4, !tbaa !0
+  %v62 = load i32, i32* %v7, align 4, !tbaa !0
+  %v63 = add nsw i32 %v62, %v61
+  %v64 = add nsw i32 %v63, %v32
+  store i32 %v64, i32* %v15, align 4, !tbaa !0
+  %v65 = add i32 %v35, -1
+  %v66 = getelementptr inbounds i32, i32* %v59, i32 %v65
+  %v67 = load i32, i32* %v66, align 4, !tbaa !0
+  %v68 = sub i32 %v49, %v5
+  %v69 = getelementptr inbounds i32, i32* %v4, i32 %v68
+  %v70 = load i32, i32* %v69, align 4, !tbaa !0
+  %v71 = add nsw i32 %v70, %v67
+  %v72 = load i32, i32* %v16, align 4, !tbaa !0
+  %v73 = add nsw i32 %v71, %v72
+  store i32 %v73, i32* %v16, align 4, !tbaa !0
+  %v74 = load i32, i32* @g6, align 4, !tbaa !0
+  %v75 = load i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*, i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)** %v17, align 4, !tbaa !7
+  %v76 = load i32, i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), align 4, !tbaa !0
+  %v77 = call i32 %v75(i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), i32* null, i32* null, i32* null, i32* null, i32 %v76, i32* null) #0
+  %v78 = load i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)*, i32 (i32*, i32*, i32*, i32*, i32*, i32, i32*)** %v18, align 4, !tbaa !7
+  %v79 = inttoptr i32 %v74 to i32*
+  %v80 = load i32, i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), align 4, !tbaa !0
+  %v81 = call i32 %v78(i32* getelementptr inbounds (%s.0, %s.0* @g1, i32 0, i32 4, i32 0), i32* null, i32* null, i32* null, i32* %v79, i32 %v80, i32* %v12) #0
+  %v82 = load i32*, i32** @g4, align 4, !tbaa !7
+  %v83 = getelementptr inbounds i32, i32* %v82, i32 %v58
+  %v84 = load i32, i32* %v83, align 4, !tbaa !0
+  %v85 = load i32, i32* %v20, align 4, !tbaa !0
+  %v86 = add nsw i32 %v85, %v84
+  store i32 %v86, i32* %v15, align 4, !tbaa !0
+  %v87 = load i32, i32* %v12, align 4, !tbaa !0
+  %v88 = icmp eq i32 %v87, 0
+  %v89 = select i1 %v88, i32 %v48, i32 1
+  store i32 %v89, i32* @g5, align 4, !tbaa !0
+  store i32 0, i32* @g3, align 4, !tbaa !0
+  br i1 %v22, label %b4, label %b14
+
+b14:                                              ; preds = %b16, %b13
+  %v90 = phi i32 [ %v95, %b16 ], [ 0, %b13 ]
+  %v91 = phi i32 [ %v94, %b16 ], [ %v65, %b13 ]
+  br i1 %v88, label %b16, label %b15
+
+b15:                                              ; preds = %b14
+  %v92 = mul i32 %v90, -4
+  %v93 = add nsw i32 %v92, 1
+  br label %b16
+
+b16:                                              ; preds = %b15, %b14
+  %v94 = phi i32 [ %v93, %b15 ], [ %v91, %b14 ]
+  %v95 = add nsw i32 %v90, 1
+  %v96 = icmp slt i32 %v90, %v21
+  br i1 %v96, label %b14, label %b3
+
+b17:                                              ; preds = %b0
+  ret i32 undef
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2, i64 0}
+!2 = !{!"omnipotent char", !3, i64 0}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !6, i64 4}
+!5 = !{!"", !1, i64 0, !6, i64 4, !2, i64 8, !2, i64 8, !2, i64 8}
+!6 = !{!"any pointer", !2, i64 0}
+!7 = !{!6, !6, i64 0}

Added: llvm/trunk/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/regscavenger_fail_hwloop.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,367 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; This test used to fail with;
+;  Assertion `ScavengingFrameIndex >= 0 && "Cannot scavenge register without an
+;             emergency spill slot!"' failed.
+
+target triple = "hexagon-unknown-linux-gnu"
+
+; Function Attrs: nounwind
+define hidden fastcc void @f0(i8* nocapture %a0, i32 %a1, i32 %a2, i32 %a3, i32 %a4, i8* nocapture %a5) #0 {
+b0:
+  %v0 = add i32 %a3, -4
+  %v1 = icmp ult i32 %v0, %a1
+  %v2 = add i32 %a2, -2
+  br i1 %v1, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v3 = add i32 %a4, -9
+  %v4 = icmp ugt i32 %v2, %v3
+  br i1 %v4, label %b2, label %b3
+
+b2:                                               ; preds = %b1, %b0
+  %v5 = add nsw i32 %a4, -1
+  %v6 = add nsw i32 %a3, -1
+  %v7 = add i32 %a2, 1
+  %v8 = add i32 %a2, 2
+  %v9 = icmp slt i32 %v7, 0
+  %v10 = icmp slt i32 %v7, %a4
+  %v11 = select i1 %v10, i32 %v7, i32 %v5
+  %v12 = select i1 %v9, i32 0, i32 %v11
+  %v13 = mul i32 %v12, %a3
+  %v14 = add i32 %a2, -1
+  %v15 = icmp slt i32 %v2, 0
+  %v16 = icmp slt i32 %v2, %a4
+  %v17 = select i1 %v16, i32 %v2, i32 %v5
+  %v18 = select i1 %v15, i32 0, i32 %v17
+  %v19 = mul i32 %v18, %a3
+  %v20 = icmp slt i32 %v14, 0
+  %v21 = icmp slt i32 %v14, %a4
+  %v22 = select i1 %v21, i32 %v14, i32 %v5
+  %v23 = select i1 %v20, i32 0, i32 %v22
+  %v24 = mul i32 %v23, %a3
+  %v25 = icmp slt i32 %a2, 0
+  %v26 = icmp slt i32 %a2, %a4
+  %v27 = select i1 %v26, i32 %a2, i32 %v5
+  %v28 = select i1 %v25, i32 0, i32 %v27
+  %v29 = mul i32 %v28, %a3
+  %v30 = add i32 %a2, 3
+  %v31 = icmp slt i32 %v8, 0
+  %v32 = icmp slt i32 %v8, %a4
+  %v33 = select i1 %v32, i32 %v8, i32 %v5
+  %v34 = select i1 %v31, i32 0, i32 %v33
+  %v35 = mul i32 %v34, %a3
+  %v36 = icmp slt i32 %v30, 0
+  %v37 = icmp slt i32 %v30, %a4
+  %v38 = select i1 %v37, i32 %v30, i32 %v5
+  %v39 = select i1 %v36, i32 0, i32 %v38
+  %v40 = mul i32 %v39, %a3
+  %v41 = add i32 %a2, 4
+  %v42 = icmp slt i32 %v41, 0
+  %v43 = icmp slt i32 %v41, %a4
+  %v44 = select i1 %v43, i32 %v41, i32 %v5
+  %v45 = select i1 %v42, i32 0, i32 %v44
+  %v46 = mul i32 %v45, %a3
+  %v47 = add i32 %a2, 5
+  %v48 = icmp slt i32 %v47, 0
+  %v49 = icmp slt i32 %v47, %a4
+  %v50 = select i1 %v49, i32 %v47, i32 %v5
+  %v51 = select i1 %v48, i32 0, i32 %v50
+  %v52 = mul i32 %v51, %a3
+  %v53 = add i32 %a2, 6
+  %v54 = icmp slt i32 %v53, 0
+  %v55 = icmp slt i32 %v53, %a4
+  %v56 = select i1 %v55, i32 %v53, i32 %v5
+  %v57 = select i1 %v54, i32 0, i32 %v56
+  %v58 = mul i32 %v57, %a3
+  br label %b5
+
+b3:                                               ; preds = %b1
+  %v59 = mul i32 %a3, %a2
+  %v60 = add i32 %v59, %a1
+  %v61 = getelementptr inbounds i8, i8* %a5, i32 %v60
+  %v62 = shl i32 %a3, 1
+  %v63 = sub i32 0, %v62
+  %v64 = sub i32 %a3, %v62
+  %v65 = add i32 %v64, %a3
+  %v66 = add i32 %v65, %a3
+  %v67 = add i32 %v66, %a3
+  %v68 = add i32 %v67, %a3
+  %v69 = add i32 %v68, %a3
+  %v70 = add i32 %v69, %a3
+  %v71 = add i32 %v70, %a3
+  br label %b4
+
+b4:                                               ; preds = %b4, %b3
+  %v72 = phi i8* [ %a0, %b3 ], [ %v165, %b4 ]
+  %v73 = phi i8* [ %v61, %b3 ], [ %v164, %b4 ]
+  %v74 = phi i32 [ 4, %b3 ], [ %v166, %b4 ]
+  %v75 = getelementptr inbounds i8, i8* %v73, i32 %v63
+  %v76 = load i8, i8* %v75, align 1, !tbaa !0
+  %v77 = zext i8 %v76 to i32
+  %v78 = getelementptr inbounds i8, i8* %v73, i32 %v64
+  %v79 = load i8, i8* %v78, align 1, !tbaa !0
+  %v80 = zext i8 %v79 to i32
+  %v81 = load i8, i8* %v73, align 1, !tbaa !0
+  %v82 = zext i8 %v81 to i32
+  %v83 = getelementptr inbounds i8, i8* %v73, i32 %v66
+  %v84 = load i8, i8* %v83, align 1, !tbaa !0
+  %v85 = zext i8 %v84 to i32
+  %v86 = getelementptr inbounds i8, i8* %v73, i32 %v67
+  %v87 = load i8, i8* %v86, align 1, !tbaa !0
+  %v88 = zext i8 %v87 to i32
+  %v89 = getelementptr inbounds i8, i8* %v73, i32 %v68
+  %v90 = load i8, i8* %v89, align 1, !tbaa !0
+  %v91 = zext i8 %v90 to i32
+  %v92 = getelementptr inbounds i8, i8* %v73, i32 %v69
+  %v93 = load i8, i8* %v92, align 1, !tbaa !0
+  %v94 = zext i8 %v93 to i32
+  %v95 = getelementptr inbounds i8, i8* %v73, i32 %v70
+  %v96 = load i8, i8* %v95, align 1, !tbaa !0
+  %v97 = zext i8 %v96 to i32
+  %v98 = getelementptr inbounds i8, i8* %v73, i32 %v71
+  %v99 = load i8, i8* %v98, align 1, !tbaa !0
+  %v100 = zext i8 %v99 to i32
+  %v101 = add nsw i32 %v88, %v80
+  %v102 = mul i32 %v101, -5
+  %v103 = add nsw i32 %v85, %v82
+  %v104 = mul nsw i32 %v103, 20
+  %v105 = add i32 %v77, 16
+  %v106 = add i32 %v105, %v104
+  %v107 = add i32 %v106, %v91
+  %v108 = add i32 %v107, %v102
+  %v109 = ashr i32 %v108, 5
+  %v110 = and i32 %v109, 256
+  %v111 = icmp ne i32 %v110, 0
+  %v112 = lshr i32 %v108, 31
+  %v113 = add i32 %v112, 255
+  %v114 = select i1 %v111, i32 %v113, i32 %v109
+  %v115 = trunc i32 %v114 to i8
+  store i8 %v115, i8* %v72, align 1, !tbaa !0
+  %v116 = add nsw i32 %v91, %v82
+  %v117 = mul i32 %v116, -5
+  %v118 = add nsw i32 %v88, %v85
+  %v119 = mul nsw i32 %v118, 20
+  %v120 = add i32 %v80, 16
+  %v121 = add i32 %v120, %v119
+  %v122 = add i32 %v121, %v94
+  %v123 = add i32 %v122, %v117
+  %v124 = ashr i32 %v123, 5
+  %v125 = and i32 %v124, 256
+  %v126 = icmp ne i32 %v125, 0
+  %v127 = lshr i32 %v123, 31
+  %v128 = add i32 %v127, 255
+  %v129 = select i1 %v126, i32 %v128, i32 %v124
+  %v130 = trunc i32 %v129 to i8
+  %v131 = getelementptr inbounds i8, i8* %v72, i32 4
+  store i8 %v130, i8* %v131, align 1, !tbaa !0
+  %v132 = add nsw i32 %v94, %v85
+  %v133 = mul i32 %v132, -5
+  %v134 = add nsw i32 %v91, %v88
+  %v135 = mul nsw i32 %v134, 20
+  %v136 = add i32 %v82, 16
+  %v137 = add i32 %v136, %v135
+  %v138 = add i32 %v137, %v97
+  %v139 = add i32 %v138, %v133
+  %v140 = ashr i32 %v139, 5
+  %v141 = and i32 %v140, 256
+  %v142 = icmp ne i32 %v141, 0
+  %v143 = lshr i32 %v139, 31
+  %v144 = add i32 %v143, 255
+  %v145 = select i1 %v142, i32 %v144, i32 %v140
+  %v146 = trunc i32 %v145 to i8
+  %v147 = getelementptr inbounds i8, i8* %v72, i32 8
+  store i8 %v146, i8* %v147, align 1, !tbaa !0
+  %v148 = add nsw i32 %v97, %v88
+  %v149 = mul i32 %v148, -5
+  %v150 = add nsw i32 %v94, %v91
+  %v151 = mul nsw i32 %v150, 20
+  %v152 = add i32 %v85, 16
+  %v153 = add i32 %v152, %v151
+  %v154 = add i32 %v153, %v100
+  %v155 = add i32 %v154, %v149
+  %v156 = ashr i32 %v155, 5
+  %v157 = and i32 %v156, 256
+  %v158 = icmp ne i32 %v157, 0
+  %v159 = lshr i32 %v155, 31
+  %v160 = add i32 %v159, 255
+  %v161 = select i1 %v158, i32 %v160, i32 %v156
+  %v162 = trunc i32 %v161 to i8
+  %v163 = getelementptr inbounds i8, i8* %v72, i32 12
+  store i8 %v162, i8* %v163, align 1, !tbaa !0
+  %v164 = getelementptr inbounds i8, i8* %v73, i32 1
+  %v165 = getelementptr inbounds i8, i8* %v72, i32 1
+  %v166 = add i32 %v74, -1
+  %v167 = icmp eq i32 %v166, 0
+  br i1 %v167, label %b7, label %b4
+
+b5:                                               ; preds = %b5, %b2
+  %v168 = phi i8* [ %a0, %b2 ], [ %v312, %b5 ]
+  %v169 = phi i32 [ 0, %b2 ], [ %v313, %b5 ]
+  %v170 = add i32 %v169, %a1
+  %v171 = icmp slt i32 %v170, 0
+  %v172 = icmp slt i32 %v170, %a3
+  %v173 = select i1 %v172, i32 %v170, i32 %v6
+  %v174 = select i1 %v171, i32 0, i32 %v173
+  %v175 = add i32 %v19, %v174
+  %v176 = getelementptr inbounds i8, i8* %a5, i32 %v175
+  %v177 = load i8, i8* %v176, align 1, !tbaa !0
+  %v178 = zext i8 %v177 to i32
+  %v179 = add i32 %v24, %v174
+  %v180 = getelementptr inbounds i8, i8* %a5, i32 %v179
+  %v181 = load i8, i8* %v180, align 1, !tbaa !0
+  %v182 = zext i8 %v181 to i32
+  %v183 = mul nsw i32 %v182, -5
+  %v184 = add nsw i32 %v183, %v178
+  %v185 = add i32 %v29, %v174
+  %v186 = getelementptr inbounds i8, i8* %a5, i32 %v185
+  %v187 = load i8, i8* %v186, align 1, !tbaa !0
+  %v188 = zext i8 %v187 to i32
+  %v189 = mul nsw i32 %v188, 20
+  %v190 = add nsw i32 %v189, %v184
+  %v191 = add i32 %v13, %v174
+  %v192 = getelementptr inbounds i8, i8* %a5, i32 %v191
+  %v193 = load i8, i8* %v192, align 1, !tbaa !0
+  %v194 = zext i8 %v193 to i32
+  %v195 = mul nsw i32 %v194, 20
+  %v196 = add nsw i32 %v195, %v190
+  %v197 = add i32 %v35, %v174
+  %v198 = getelementptr inbounds i8, i8* %a5, i32 %v197
+  %v199 = load i8, i8* %v198, align 1, !tbaa !0
+  %v200 = zext i8 %v199 to i32
+  %v201 = mul nsw i32 %v200, -5
+  %v202 = add nsw i32 %v201, %v196
+  %v203 = add i32 %v40, %v174
+  %v204 = getelementptr inbounds i8, i8* %a5, i32 %v203
+  %v205 = load i8, i8* %v204, align 1, !tbaa !0
+  %v206 = zext i8 %v205 to i32
+  %v207 = add nsw i32 %v206, %v202
+  %v208 = add nsw i32 %v207, 16
+  %v209 = ashr i32 %v208, 5
+  %v210 = and i32 %v209, 256
+  %v211 = icmp ne i32 %v210, 0
+  %v212 = lshr i32 %v208, 31
+  %v213 = add i32 %v212, 255
+  %v214 = select i1 %v211, i32 %v213, i32 %v209
+  %v215 = trunc i32 %v214 to i8
+  store i8 %v215, i8* %v168, align 1, !tbaa !0
+  %v216 = getelementptr inbounds i8, i8* %v168, i32 4
+  %v217 = load i8, i8* %v180, align 1, !tbaa !0
+  %v218 = zext i8 %v217 to i32
+  %v219 = load i8, i8* %v186, align 1, !tbaa !0
+  %v220 = zext i8 %v219 to i32
+  %v221 = mul nsw i32 %v220, -5
+  %v222 = add nsw i32 %v221, %v218
+  %v223 = load i8, i8* %v192, align 1, !tbaa !0
+  %v224 = zext i8 %v223 to i32
+  %v225 = mul nsw i32 %v224, 20
+  %v226 = add nsw i32 %v225, %v222
+  %v227 = load i8, i8* %v198, align 1, !tbaa !0
+  %v228 = zext i8 %v227 to i32
+  %v229 = mul nsw i32 %v228, 20
+  %v230 = add nsw i32 %v229, %v226
+  %v231 = load i8, i8* %v204, align 1, !tbaa !0
+  %v232 = zext i8 %v231 to i32
+  %v233 = mul nsw i32 %v232, -5
+  %v234 = add nsw i32 %v233, %v230
+  %v235 = add i32 %v46, %v174
+  %v236 = getelementptr inbounds i8, i8* %a5, i32 %v235
+  %v237 = load i8, i8* %v236, align 1, !tbaa !0
+  %v238 = zext i8 %v237 to i32
+  %v239 = add nsw i32 %v238, %v234
+  %v240 = add nsw i32 %v239, 16
+  %v241 = ashr i32 %v240, 5
+  %v242 = and i32 %v241, 256
+  %v243 = icmp ne i32 %v242, 0
+  %v244 = lshr i32 %v240, 31
+  %v245 = add i32 %v244, 255
+  %v246 = select i1 %v243, i32 %v245, i32 %v241
+  %v247 = trunc i32 %v246 to i8
+  store i8 %v247, i8* %v216, align 1, !tbaa !0
+  %v248 = getelementptr inbounds i8, i8* %v168, i32 8
+  %v249 = load i8, i8* %v186, align 1, !tbaa !0
+  %v250 = zext i8 %v249 to i32
+  %v251 = load i8, i8* %v192, align 1, !tbaa !0
+  %v252 = zext i8 %v251 to i32
+  %v253 = mul nsw i32 %v252, -5
+  %v254 = add nsw i32 %v253, %v250
+  %v255 = load i8, i8* %v198, align 1, !tbaa !0
+  %v256 = zext i8 %v255 to i32
+  %v257 = mul nsw i32 %v256, 20
+  %v258 = add nsw i32 %v257, %v254
+  %v259 = load i8, i8* %v204, align 1, !tbaa !0
+  %v260 = zext i8 %v259 to i32
+  %v261 = mul nsw i32 %v260, 20
+  %v262 = add nsw i32 %v261, %v258
+  %v263 = load i8, i8* %v236, align 1, !tbaa !0
+  %v264 = zext i8 %v263 to i32
+  %v265 = mul nsw i32 %v264, -5
+  %v266 = add nsw i32 %v265, %v262
+  %v267 = add i32 %v52, %v174
+  %v268 = getelementptr inbounds i8, i8* %a5, i32 %v267
+  %v269 = load i8, i8* %v268, align 1, !tbaa !0
+  %v270 = zext i8 %v269 to i32
+  %v271 = add nsw i32 %v270, %v266
+  %v272 = add nsw i32 %v271, 16
+  %v273 = ashr i32 %v272, 5
+  %v274 = and i32 %v273, 256
+  %v275 = icmp ne i32 %v274, 0
+  %v276 = lshr i32 %v272, 31
+  %v277 = add i32 %v276, 255
+  %v278 = select i1 %v275, i32 %v277, i32 %v273
+  %v279 = trunc i32 %v278 to i8
+  store i8 %v279, i8* %v248, align 1, !tbaa !0
+  %v280 = getelementptr inbounds i8, i8* %v168, i32 12
+  %v281 = load i8, i8* %v192, align 1, !tbaa !0
+  %v282 = zext i8 %v281 to i32
+  %v283 = load i8, i8* %v198, align 1, !tbaa !0
+  %v284 = zext i8 %v283 to i32
+  %v285 = mul nsw i32 %v284, -5
+  %v286 = add nsw i32 %v285, %v282
+  %v287 = load i8, i8* %v204, align 1, !tbaa !0
+  %v288 = zext i8 %v287 to i32
+  %v289 = mul nsw i32 %v288, 20
+  %v290 = add nsw i32 %v289, %v286
+  %v291 = load i8, i8* %v236, align 1, !tbaa !0
+  %v292 = zext i8 %v291 to i32
+  %v293 = mul nsw i32 %v292, 20
+  %v294 = add nsw i32 %v293, %v290
+  %v295 = load i8, i8* %v268, align 1, !tbaa !0
+  %v296 = zext i8 %v295 to i32
+  %v297 = mul nsw i32 %v296, -5
+  %v298 = add nsw i32 %v297, %v294
+  %v299 = add i32 %v58, %v174
+  %v300 = getelementptr inbounds i8, i8* %a5, i32 %v299
+  %v301 = load i8, i8* %v300, align 1, !tbaa !0
+  %v302 = zext i8 %v301 to i32
+  %v303 = add nsw i32 %v302, %v298
+  %v304 = add nsw i32 %v303, 16
+  %v305 = ashr i32 %v304, 5
+  %v306 = and i32 %v305, 256
+  %v307 = icmp ne i32 %v306, 0
+  %v308 = lshr i32 %v304, 31
+  %v309 = add i32 %v308, 255
+  %v310 = select i1 %v307, i32 %v309, i32 %v305
+  %v311 = trunc i32 %v310 to i8
+  store i8 %v311, i8* %v280, align 1, !tbaa !0
+  %v312 = getelementptr inbounds i8, i8* %v168, i32 1
+  %v313 = add i32 %v169, 1
+  %v314 = icmp eq i32 %v313, 4
+  br i1 %v314, label %b6, label %b5
+
+b6:                                               ; preds = %b5
+  br label %b8
+
+b7:                                               ; preds = %b4
+  br label %b8
+
+b8:                                               ; preds = %b7, %b6
+  ret void
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"omnipotent char", !2}
+!2 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/regscavengerbug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/regscavengerbug.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/regscavengerbug.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/regscavengerbug.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,357 @@
+; RUN: llc -march=hexagon -O3 < %s
+; REQUIRES: asserts
+
+; This used to assert in the register scavenger.
+
+target triple = "hexagon-unknown-linux-gnu"
+
+%0 = type { %1 }
+%1 = type { %2 }
+%2 = type { [4 x [4 x double]] }
+%3 = type { [3 x double] }
+%4 = type { %5, %0, %0, %5*, %3, %3 }
+%5 = type { i32 (...)** }
+%6 = type { %3, %3 }
+
+declare void @f0(%3* sret, %0*, %3*)
+
+; Function Attrs: nounwind
+define void @f1(%4* %a0, %0* nocapture %a1, %0* nocapture %a2) #0 align 2 {
+b0:
+  %v0 = alloca %6, align 8
+  %v1 = alloca [2 x [2 x [2 x %3]]], align 8
+  %v2 = alloca %3, align 8
+  %v3 = getelementptr inbounds %4, %4* %a0, i32 0, i32 1
+  %v4 = bitcast %0* %v3 to i8*
+  %v5 = bitcast %0* %a1 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v4, i8* align 8 %v5, i32 128, i1 false)
+  %v6 = getelementptr inbounds %4, %4* %a0, i32 0, i32 2
+  %v7 = bitcast %0* %v6 to i8*
+  %v8 = bitcast %0* %a2 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v7, i8* align 8 %v8, i32 128, i1 false)
+  %v9 = bitcast %6* %v0 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v9, i8 0, i64 48, i1 false)
+  %v10 = getelementptr inbounds %4, %4* %a0, i32 0, i32 3
+  %v11 = load %5*, %5** %v10, align 4, !tbaa !0
+  %v12 = bitcast %5* %v11 to i32 (%5*, double, double, %6*)***
+  %v13 = load i32 (%5*, double, double, %6*)**, i32 (%5*, double, double, %6*)*** %v12, align 4, !tbaa !4
+  %v14 = getelementptr inbounds i32 (%5*, double, double, %6*)*, i32 (%5*, double, double, %6*)** %v13, i32 3
+  %v15 = load i32 (%5*, double, double, %6*)*, i32 (%5*, double, double, %6*)** %v14, align 4
+  %v16 = call i32 %v15(%5* %v11, double 0.000000e+00, double 0.000000e+00, %6* %v0)
+  %v17 = icmp eq i32 %v16, 0
+  br i1 %v17, label %b1, label %b3
+
+b1:                                               ; preds = %b0
+  %v18 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 0
+  store double -1.000000e+06, double* %v18, align 8, !tbaa !6
+  %v19 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 1
+  store double -1.000000e+06, double* %v19, align 8, !tbaa !6
+  %v20 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 2
+  store double -1.000000e+06, double* %v20, align 8, !tbaa !6
+  %v21 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 0
+  store double 1.000000e+06, double* %v21, align 8, !tbaa !6
+  %v22 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 1
+  store double 1.000000e+06, double* %v22, align 8, !tbaa !6
+  %v23 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 2
+  store double 1.000000e+06, double* %v23, align 8, !tbaa !6
+  br label %b2
+
+b2:                                               ; preds = %b3, %b1
+  ret void
+
+b3:                                               ; preds = %b0
+  %v24 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0
+  %v25 = bitcast [2 x [2 x [2 x %3]]]* %v1 to i8*
+  %v26 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 2
+  %v27 = bitcast %3* %v26 to i8*
+  %v28 = bitcast [2 x [2 x [2 x %3]]]* %v1 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v28, i8 0, i64 48, i1 false)
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v27, i8 0, i64 24, i1 false)
+  %v29 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 3
+  %v30 = bitcast %3* %v29 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v30, i8 0, i64 24, i1 false)
+  %v31 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 4
+  %v32 = bitcast %3* %v31 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v32, i8 0, i64 24, i1 false)
+  %v33 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 5
+  %v34 = bitcast %3* %v33 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v34, i8 0, i64 24, i1 false)
+  %v35 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 6
+  %v36 = bitcast %3* %v35 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v36, i8 0, i64 24, i1 false)
+  %v37 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 7
+  %v38 = bitcast %3* %v37 to i8*
+  call void @llvm.memset.p0i8.i64(i8* align 8 %v38, i8 0, i64 24, i1 false)
+  %v39 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 0
+  %v40 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 1
+  %v41 = getelementptr inbounds %6, %6* %v0, i32 0, i32 0, i32 0, i32 2
+  %v42 = bitcast %3* %v2 to i8*
+  %v43 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 2
+  %v44 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 1
+  %v45 = getelementptr inbounds %6, %6* %v0, i32 0, i32 1, i32 0, i32 0
+  %v46 = load double, double* %v39, align 8, !tbaa !6
+  %v47 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
+  store double %v46, double* %v47, align 8, !tbaa !6
+  %v48 = load double, double* %v40, align 8, !tbaa !6
+  %v49 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  store double %v48, double* %v49, align 8, !tbaa !6
+  %v50 = load double, double* %v41, align 8, !tbaa !6
+  %v51 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
+  store double %v50, double* %v51, align 8, !tbaa !6
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v24)
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v25, i8* align 8 %v42, i32 24, i1 false)
+  %v52 = load double, double* %v39, align 8, !tbaa !6
+  %v53 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
+  store double %v52, double* %v53, align 8, !tbaa !6
+  %v54 = load double, double* %v40, align 8, !tbaa !6
+  %v55 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
+  store double %v54, double* %v55, align 8, !tbaa !6
+  %v56 = load double, double* %v43, align 8, !tbaa !6
+  %v57 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
+  store double %v56, double* %v57, align 8, !tbaa !6
+  %v58 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v58)
+  %v59 = bitcast %3* %v58 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v59, i8* align 8 %v42, i32 24, i1 false)
+  %v60 = load double, double* %v39, align 8, !tbaa !6
+  %v61 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
+  store double %v60, double* %v61, align 8, !tbaa !6
+  %v62 = load double, double* %v44, align 8, !tbaa !6
+  %v63 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
+  store double %v62, double* %v63, align 8, !tbaa !6
+  %v64 = load double, double* %v41, align 8, !tbaa !6
+  %v65 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
+  store double %v64, double* %v65, align 8, !tbaa !6
+  %v66 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v66)
+  %v67 = bitcast %3* %v66 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v67, i8* align 8 %v42, i32 24, i1 false)
+  %v68 = load double, double* %v39, align 8, !tbaa !6
+  %v69 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
+  store double %v68, double* %v69, align 8, !tbaa !6
+  %v70 = load double, double* %v44, align 8, !tbaa !6
+  %v71 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
+  store double %v70, double* %v71, align 8, !tbaa !6
+  %v72 = load double, double* %v43, align 8, !tbaa !6
+  %v73 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
+  store double %v72, double* %v73, align 8, !tbaa !6
+  %v74 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v74)
+  %v75 = bitcast %3* %v74 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v75, i8* align 8 %v42, i32 24, i1 false)
+  %v76 = load double, double* %v45, align 8, !tbaa !6
+  %v77 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
+  store double %v76, double* %v77, align 8, !tbaa !6
+  %v78 = load double, double* %v40, align 8, !tbaa !6
+  %v79 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
+  store double %v78, double* %v79, align 8, !tbaa !6
+  %v80 = load double, double* %v41, align 8, !tbaa !6
+  %v81 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
+  store double %v80, double* %v81, align 8, !tbaa !6
+  %v82 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v82)
+  %v83 = bitcast %3* %v82 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v83, i8* align 8 %v42, i32 24, i1 false)
+  %v84 = load double, double* %v45, align 8, !tbaa !6
+  %v85 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
+  store double %v84, double* %v85, align 8, !tbaa !6
+  %v86 = load double, double* %v40, align 8, !tbaa !6
+  %v87 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
+  store double %v86, double* %v87, align 8, !tbaa !6
+  %v88 = load double, double* %v43, align 8, !tbaa !6
+  %v89 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
+  store double %v88, double* %v89, align 8, !tbaa !6
+  %v90 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v90)
+  %v91 = bitcast %3* %v90 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v91, i8* align 8 %v42, i32 24, i1 false)
+  %v92 = load double, double* %v45, align 8, !tbaa !6
+  %v93 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
+  store double %v92, double* %v93, align 8, !tbaa !6
+  %v94 = load double, double* %v44, align 8, !tbaa !6
+  %v95 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
+  store double %v94, double* %v95, align 8, !tbaa !6
+  %v96 = load double, double* %v41, align 8, !tbaa !6
+  %v97 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
+  store double %v96, double* %v97, align 8, !tbaa !6
+  %v98 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v98)
+  %v99 = bitcast %3* %v98 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v99, i8* align 8 %v42, i32 24, i1 false)
+  %v100 = load double, double* %v45, align 8, !tbaa !6
+  %v101 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
+  store double %v100, double* %v101, align 8, !tbaa !6
+  %v102 = load double, double* %v44, align 8, !tbaa !6
+  %v103 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
+  store double %v102, double* %v103, align 8, !tbaa !6
+  %v104 = load double, double* %v43, align 8, !tbaa !6
+  %v105 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
+  store double %v104, double* %v105, align 8, !tbaa !6
+  %v106 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1
+  call void @f0(%3* sret %v2, %0* %v3, %3* %v106)
+  %v107 = bitcast %3* %v106 to i8*
+  call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 8 %v107, i8* align 8 %v42, i32 24, i1 false)
+  %v108 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0
+  %v109 = load double, double* %v108, align 8, !tbaa !6
+  %v110 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 1
+  %v111 = load double, double* %v110, align 8, !tbaa !6
+  %v112 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 2
+  %v113 = load double, double* %v112, align 8, !tbaa !6
+  %v114 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 0
+  %v115 = load double, double* %v114, align 8, !tbaa !6
+  %v116 = fcmp olt double %v115, %v109
+  %v117 = select i1 %v116, double %v115, double %v109
+  %v118 = fcmp ogt double %v115, %v109
+  %v119 = select i1 %v118, double %v115, double %v109
+  %v120 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 1
+  %v121 = load double, double* %v120, align 8, !tbaa !6
+  %v122 = fcmp olt double %v121, %v111
+  %v123 = select i1 %v122, double %v121, double %v111
+  %v124 = fcmp ogt double %v121, %v111
+  %v125 = select i1 %v124, double %v121, double %v111
+  %v126 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 0, i32 1, i32 0, i32 2
+  %v127 = load double, double* %v126, align 8, !tbaa !6
+  %v128 = fcmp olt double %v127, %v113
+  %v129 = select i1 %v128, double %v127, double %v113
+  %v130 = fcmp ogt double %v127, %v113
+  %v131 = select i1 %v130, double %v127, double %v113
+  %v132 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 0
+  %v133 = load double, double* %v132, align 8, !tbaa !6
+  %v134 = fcmp olt double %v133, %v117
+  %v135 = select i1 %v134, double %v133, double %v117
+  %v136 = fcmp ogt double %v133, %v119
+  %v137 = select i1 %v136, double %v133, double %v119
+  %v138 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 1
+  %v139 = load double, double* %v138, align 8, !tbaa !6
+  %v140 = fcmp olt double %v139, %v123
+  %v141 = select i1 %v140, double %v139, double %v123
+  %v142 = fcmp ogt double %v139, %v125
+  %v143 = select i1 %v142, double %v139, double %v125
+  %v144 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 0, i32 0, i32 2
+  %v145 = load double, double* %v144, align 8, !tbaa !6
+  %v146 = fcmp olt double %v145, %v129
+  %v147 = select i1 %v146, double %v145, double %v129
+  %v148 = fcmp ogt double %v145, %v131
+  %v149 = select i1 %v148, double %v145, double %v131
+  %v150 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 0
+  %v151 = load double, double* %v150, align 8, !tbaa !6
+  %v152 = fcmp olt double %v151, %v135
+  %v153 = select i1 %v152, double %v151, double %v135
+  %v154 = fcmp ogt double %v151, %v137
+  %v155 = select i1 %v154, double %v151, double %v137
+  %v156 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 1
+  %v157 = load double, double* %v156, align 8, !tbaa !6
+  %v158 = fcmp olt double %v157, %v141
+  %v159 = select i1 %v158, double %v157, double %v141
+  %v160 = fcmp ogt double %v157, %v143
+  %v161 = select i1 %v160, double %v157, double %v143
+  %v162 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 0, i32 1, i32 1, i32 0, i32 2
+  %v163 = load double, double* %v162, align 8, !tbaa !6
+  %v164 = fcmp olt double %v163, %v147
+  %v165 = select i1 %v164, double %v163, double %v147
+  %v166 = fcmp ogt double %v163, %v149
+  %v167 = select i1 %v166, double %v163, double %v149
+  %v168 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 0
+  %v169 = load double, double* %v168, align 8, !tbaa !6
+  %v170 = fcmp olt double %v169, %v153
+  %v171 = select i1 %v170, double %v169, double %v153
+  %v172 = fcmp ogt double %v169, %v155
+  %v173 = select i1 %v172, double %v169, double %v155
+  %v174 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 1
+  %v175 = load double, double* %v174, align 8, !tbaa !6
+  %v176 = fcmp olt double %v175, %v159
+  %v177 = select i1 %v176, double %v175, double %v159
+  %v178 = fcmp ogt double %v175, %v161
+  %v179 = select i1 %v178, double %v175, double %v161
+  %v180 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 0, i32 0, i32 2
+  %v181 = load double, double* %v180, align 8, !tbaa !6
+  %v182 = fcmp olt double %v181, %v165
+  %v183 = select i1 %v182, double %v181, double %v165
+  %v184 = fcmp ogt double %v181, %v167
+  %v185 = select i1 %v184, double %v181, double %v167
+  %v186 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 0
+  %v187 = load double, double* %v186, align 8, !tbaa !6
+  %v188 = fcmp olt double %v187, %v171
+  %v189 = select i1 %v188, double %v187, double %v171
+  %v190 = fcmp ogt double %v187, %v173
+  %v191 = select i1 %v190, double %v187, double %v173
+  %v192 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1
+  %v193 = load double, double* %v192, align 8, !tbaa !6
+  %v194 = fcmp olt double %v193, %v177
+  %v195 = select i1 %v194, double %v193, double %v177
+  %v196 = fcmp ogt double %v193, %v179
+  %v197 = select i1 %v196, double %v193, double %v179
+  %v198 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 2
+  %v199 = load double, double* %v198, align 8, !tbaa !6
+  %v200 = fcmp olt double %v199, %v183
+  %v201 = select i1 %v200, double %v199, double %v183
+  %v202 = fcmp ogt double %v199, %v185
+  %v203 = select i1 %v202, double %v199, double %v185
+  %v204 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 0
+  %v205 = load double, double* %v204, align 8, !tbaa !6
+  %v206 = fcmp olt double %v205, %v189
+  %v207 = select i1 %v206, double %v205, double %v189
+  %v208 = fcmp ogt double %v205, %v191
+  %v209 = select i1 %v208, double %v205, double %v191
+  %v210 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1
+  %v211 = load double, double* %v210, align 8, !tbaa !6
+  %v212 = fcmp olt double %v211, %v195
+  %v213 = select i1 %v212, double %v211, double %v195
+  %v214 = fcmp ogt double %v211, %v197
+  %v215 = select i1 %v214, double %v211, double %v197
+  %v216 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 2
+  %v217 = load double, double* %v216, align 8, !tbaa !6
+  %v218 = fcmp olt double %v217, %v201
+  %v219 = select i1 %v218, double %v217, double %v201
+  %v220 = fcmp ogt double %v217, %v203
+  %v221 = select i1 %v220, double %v217, double %v203
+  %v222 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 0
+  %v223 = load double, double* %v222, align 8, !tbaa !6
+  %v224 = fcmp olt double %v223, %v207
+  %v225 = select i1 %v224, double %v223, double %v207
+  %v226 = fcmp ogt double %v223, %v209
+  %v227 = select i1 %v226, double %v223, double %v209
+  %v228 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 1
+  %v229 = load double, double* %v228, align 8, !tbaa !6
+  %v230 = fcmp olt double %v229, %v213
+  %v231 = select i1 %v230, double %v229, double %v213
+  %v232 = fcmp ogt double %v229, %v215
+  %v233 = select i1 %v232, double %v229, double %v215
+  %v234 = getelementptr inbounds [2 x [2 x [2 x %3]]], [2 x [2 x [2 x %3]]]* %v1, i32 0, i32 1, i32 1, i32 1, i32 0, i32 2
+  %v235 = load double, double* %v234, align 8, !tbaa !6
+  %v236 = fcmp olt double %v235, %v219
+  %v237 = select i1 %v236, double %v235, double %v219
+  %v238 = fcmp ogt double %v235, %v221
+  %v239 = select i1 %v238, double %v235, double %v221
+  %v240 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 0
+  store double %v225, double* %v240, align 8
+  %v241 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 1
+  store double %v231, double* %v241, align 8
+  %v242 = getelementptr inbounds %4, %4* %a0, i32 0, i32 4, i32 0, i32 2
+  store double %v237, double* %v242, align 8
+  %v243 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 0
+  store double %v227, double* %v243, align 8
+  %v244 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 1
+  store double %v233, double* %v244, align 8
+  %v245 = getelementptr inbounds %4, %4* %a0, i32 0, i32 5, i32 0, i32 2
+  store double %v239, double* %v245, align 8
+  br label %b2
+}
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture writeonly, i8* nocapture readonly, i32, i1) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { argmemonly nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"vtable pointer", !3}
+!6 = !{!7, !7, i64 0}
+!7 = !{!"double", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/rotl-i64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/rotl-i64.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/rotl-i64.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/rotl-i64.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,32 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: asl
+
+; Function Attrs: nounwind
+define fastcc void @f0() #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b1, %b0
+  br i1 undef, label %b2, label %b1
+
+b2:                                               ; preds = %b1
+  br label %b3
+
+b3:                                               ; preds = %b3, %b2
+  %v0 = load i64, i64* undef, align 8, !tbaa !0
+  %v1 = lshr i64 %v0, 8
+  %v2 = shl i64 %v0, 56
+  %v3 = or i64 %v2, %v1
+  %v4 = xor i64 %v3, 0
+  %v5 = xor i64 %v4, 0
+  %v6 = add i64 0, %v5
+  store i64 %v6, i64* undef, align 8, !tbaa !0
+  br label %b3
+}
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"long long", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/save-kill-csr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/save-kill-csr.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/save-kill-csr.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/save-kill-csr.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,95 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+target triple = "hexagon"
+
+%s.0 = type { i8, i8, i8, i8 }
+%s.1 = type { %s.2 }
+%s.2 = type { %s.3 }
+%s.3 = type { i32 (...)** }
+%s.4 = type { i8, i8, i16, i8 }
+%s.5 = type { i8, %s.0* }
+
+ at g0 = external hidden global [3 x %s.0], align 8
+ at g1 = external hidden global [3 x %s.0], align 8
+ at g2 = external hidden global [3 x %s.0], align 8
+ at g3 = external hidden global [3 x %s.0], align 8
+ at g4 = external hidden global [3 x %s.0], align 8
+ at g5 = external hidden global [3 x %s.0], align 8
+ at g6 = external hidden global [4 x %s.0], align 8
+ at g7 = external hidden global [3 x %s.0], align 8
+ at g8 = external hidden global [3 x %s.0], align 8
+ at g9 = external hidden global [3 x %s.0], align 8
+ at g10 = external hidden global [4 x %s.0], align 8
+ at g11 = external hidden global [3 x %s.0], align 8
+ at g12 = external hidden global [3 x %s.0], align 8
+ at g13 = external hidden global [4 x %s.0], align 8
+ at g14 = external hidden global [3 x %s.0], align 8
+ at g15 = external hidden global [3 x %s.0], align 8
+ at g16 = external hidden global [3 x %s.0], align 8
+ at g17 = external hidden global [4 x %s.0], align 8
+ at g18 = external hidden global [3 x %s.0], align 8
+
+; Function Attrs: norecurse nounwind optsize ssp
+define hidden zeroext i8 @f0(%s.1* nocapture readnone %a0, %s.4* readonly %a1, %s.5* %a2, i32 %a3) unnamed_addr #0 align 2 {
+b0:
+  br i1 undef, label %b4, label %b1
+
+b1:                                               ; preds = %b0
+  %v0 = icmp eq i32 %a3, 1
+  %v1 = select i1 %v0, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g18, i32 0, i32 0), %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g0, i32 0, i32 0)
+  %v2 = icmp eq i32 %a3, 2
+  %v3 = select i1 %v2, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g16, i32 0, i32 0), %s.0* %v1
+  %v4 = icmp eq i32 %a3, 3
+  %v5 = select i1 %v4, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g15, i32 0, i32 0), %s.0* %v3
+  %v6 = icmp eq i32 %a3, 4
+  %v7 = select i1 %v6, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g14, i32 0, i32 0), %s.0* %v5
+  %v8 = icmp eq i32 %a3, 5
+  %v9 = select i1 %v8, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g12, i32 0, i32 0), %s.0* %v7
+  %v10 = icmp eq i32 %a3, 6
+  %v11 = select i1 %v10, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g11, i32 0, i32 0), %s.0* %v9
+  %v12 = icmp eq i32 %a3, 7
+  %v13 = select i1 %v12, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g9, i32 0, i32 0), %s.0* %v11
+  %v14 = icmp eq i32 %a3, 8
+  %v15 = select i1 %v14, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g8, i32 0, i32 0), %s.0* %v13
+  %v16 = icmp eq i32 %a3, 9
+  %v17 = select i1 %v16, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g7, i32 0, i32 0), %s.0* %v15
+  %v18 = icmp eq i32 %a3, 10
+  %v19 = select i1 %v18, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g5, i32 0, i32 0), %s.0* %v17
+  %v20 = icmp eq i32 %a3, 11
+  %v21 = select i1 %v20, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g4, i32 0, i32 0), %s.0* %v19
+  %v22 = icmp eq i32 %a3, 12
+  %v23 = select i1 %v22, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g3, i32 0, i32 0), %s.0* %v21
+  %v24 = icmp eq i32 %a3, 13
+  %v25 = select i1 %v24, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g2, i32 0, i32 0), %s.0* %v23
+  %v26 = select i1 undef, %s.0* getelementptr inbounds ([3 x %s.0], [3 x %s.0]* @g1, i32 0, i32 0), %s.0* %v25
+  %v27 = select i1 undef, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g17, i32 0, i32 0), %s.0* %v26
+  %v28 = icmp eq i32 %a3, 16
+  %v29 = select i1 %v28, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g13, i32 0, i32 0), %s.0* %v27
+  %v30 = icmp eq i32 %a3, 17
+  %v31 = select i1 %v30, %s.0* null, %s.0* %v29
+  %v32 = select i1 undef, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g10, i32 0, i32 0), %s.0* %v31
+  %v33 = select i1 undef, %s.0* getelementptr inbounds ([4 x %s.0], [4 x %s.0]* @g6, i32 0, i32 0), %s.0* %v32
+  %v34 = add i32 %a3, -15
+  %v35 = icmp ult i32 %v34, 2
+  %v36 = select i1 %v35, i8 4, i8 3
+  %v37 = select i1 undef, i8 0, i8 %v36
+  %v38 = select i1 undef, i8 4, i8 %v37
+  br i1 undef, label %b2, label %b3
+
+b2:                                               ; preds = %b3, %b1
+  %v39 = phi %s.0* [ undef, %b3 ], [ %v33, %b1 ]
+  %v40 = phi i8 [ undef, %b3 ], [ %v38, %b1 ]
+  %v41 = getelementptr inbounds %s.5, %s.5* %a2, i32 0, i32 1
+  store %s.0* %v39, %s.0** %v41, align 4
+  store i8 %v40, i8* undef, align 4
+  br label %b4
+
+b3:                                               ; preds = %b1
+  br label %b2
+
+b4:                                               ; preds = %b2, %b0
+  ret i8 undef
+}
+
+attributes #0 = { norecurse nounwind optsize ssp "target-cpu"="hexagonv55" }

Added: llvm/trunk/test/CodeGen/Hexagon/save-regs-thresh.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/save-regs-thresh.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/save-regs-thresh.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/save-regs-thresh.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,112 @@
+; RUN: llc -march=hexagon -O2 -spill-func-threshold=4 < %s | FileCheck %s --check-prefix=NOSAVE
+; RUN: llc -march=hexagon -O2 -spill-func-threshold=2 < %s | FileCheck %s --check-prefix=SAVE
+; NOSAVE-NOT: call __save_r16_
+; SAVE: call __save_r16_
+
+target triple = "hexagon"
+
+%s.0 = type { %s.1, [50 x %s.2], i8, i32 }
+%s.1 = type { i8, i8, i8, i8, i8, i8, i8, i8, [2 x i8], [2 x i8], [4 x i8] }
+%s.2 = type { %s.3, [16 x i8] }
+%s.3 = type { %s.4, %s.5 }
+%s.4 = type { i8, i8, [2 x i8], [4 x i8] }
+%s.5 = type { i16, i16 }
+
+ at g0 = private unnamed_addr constant [21 x i8] c"....................\00", align 1
+ at g1 = internal unnamed_addr global [1 x %s.0*] zeroinitializer, align 4
+
+; Function Attrs: nounwind
+define void @f0(i8 zeroext %a0, %s.0** nocapture %a1) #0 {
+b0:
+  %v0 = tail call i8* @f1(i8 zeroext %a0, i32 1424, i8* getelementptr inbounds ([21 x i8], [21 x i8]* @g0, i32 0, i32 0), i32 118) #0
+  %v1 = bitcast i8* %v0 to %s.0*
+  %v2 = zext i8 %a0 to i32
+  %v3 = getelementptr inbounds [1 x %s.0*], [1 x %s.0*]* @g1, i32 0, i32 %v2
+  store %s.0* %v1, %s.0** %v3, align 4, !tbaa !0
+  store %s.0* %v1, %s.0** %a1, align 4, !tbaa !0
+  ret void
+}
+
+declare i8* @f1(i8 zeroext, i32, i8*, i32)
+
+; Function Attrs: nounwind
+define void @f2(i8 zeroext %a0) #0 {
+b0:
+  %v0 = zext i8 %a0 to i32
+  %v1 = getelementptr inbounds [1 x %s.0*], [1 x %s.0*]* @g1, i32 0, i32 %v0
+  %v2 = load %s.0*, %s.0** %v1, align 4, !tbaa !0
+  %v3 = getelementptr inbounds %s.0, %s.0* %v2, i32 0, i32 0, i32 0
+  tail call void @f3(i8 zeroext %a0, i8* %v3, i8* getelementptr inbounds ([21 x i8], [21 x i8]* @g0, i32 0, i32 0), i32 142) #0
+  store %s.0* null, %s.0** %v1, align 4, !tbaa !0
+  ret void
+}
+
+declare void @f3(i8 zeroext, i8*, i8*, i32)
+
+; Function Attrs: nounwind
+define void @f4(i8 zeroext %a0, i8 zeroext %a1, i8 zeroext %a2, i8 zeroext %a3, i8 zeroext %a4) #0 {
+b0:
+  %v0 = alloca [7 x i32], align 4
+  %v1 = zext i8 %a0 to i32
+  %v2 = getelementptr inbounds [1 x %s.0*], [1 x %s.0*]* @g1, i32 0, i32 %v1
+  %v3 = load %s.0*, %s.0** %v2, align 4, !tbaa !0
+  %v4 = getelementptr inbounds %s.0, %s.0* %v3, i32 0, i32 3
+  %v5 = load i32, i32* %v4, align 4, !tbaa !4
+  %v6 = and i32 %v5, 8
+  %v7 = icmp eq i32 %v6, 0
+  br i1 %v7, label %b2, label %b1
+
+b1:                                               ; preds = %b0
+  %v8 = getelementptr inbounds [7 x i32], [7 x i32]* %v0, i32 0, i32 0
+  %v9 = bitcast [7 x i32]* %v0 to %s.2*
+  %v10 = call i32 @f5() #0
+  %v11 = getelementptr [7 x i32], [7 x i32]* %v0, i32 0, i32 1
+  store i32 %v10, i32* %v11, align 4
+  %v12 = call zeroext i16 @f6(i8 zeroext %a0) #0
+  %v13 = zext i16 %v12 to i32
+  %v14 = shl nuw i32 %v13, 16
+  %v15 = or i32 %v14, 260
+  store i32 %v15, i32* %v8, align 4
+  %v16 = zext i8 %a1 to i32
+  %v17 = getelementptr [7 x i32], [7 x i32]* %v0, i32 0, i32 2
+  %v18 = zext i8 %a2 to i32
+  %v19 = shl nuw nsw i32 %v18, 12
+  %v20 = zext i8 %a3 to i32
+  %v21 = shl nuw nsw i32 %v20, 16
+  %v22 = and i32 %v21, 458752
+  %v23 = and i32 %v19, 61440
+  %v24 = zext i8 %a4 to i32
+  %v25 = shl nuw nsw i32 %v24, 19
+  %v26 = and i32 %v25, 3670016
+  %v27 = or i32 %v23, %v16
+  %v28 = or i32 %v27, %v22
+  %v29 = or i32 %v28, %v26
+  %v30 = call zeroext i8 @f7(i8 zeroext %a0, i8 zeroext %a1) #0
+  %v31 = zext i8 %v30 to i32
+  %v32 = shl nuw nsw i32 %v31, 8
+  %v33 = and i32 %v32, 3840
+  %v34 = or i32 %v33, %v29
+  store i32 %v34, i32* %v17, align 4
+  %v35 = call i32 bitcast (i32 (...)* @f8 to i32 (i32, %s.2*)*)(i32 %v1, %s.2* %v9) #0
+  br label %b2
+
+b2:                                               ; preds = %b1, %b0
+  ret void
+}
+
+declare i32 @f5()
+
+declare zeroext i16 @f6(i8 zeroext)
+
+declare zeroext i8 @f7(i8 zeroext, i8 zeroext)
+
+declare i32 @f8(...)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"any pointer", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}
+!4 = !{!5, !5, i64 0}
+!5 = !{!"long", !2}

Added: llvm/trunk/test/CodeGen/Hexagon/sdata-expand-const.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/sdata-expand-const.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/sdata-expand-const.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/sdata-expand-const.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,19 @@
+; RUN: llc -march=hexagon -hexagon-small-data-threshold=0 < %s | FileCheck %s
+; CHECK-NOT: CONST
+
+target triple = "hexagon"
+
+; Function Attrs: nounwind
+define i32 @f0(i64 %a0) #0 {
+b0:
+  %v0 = alloca i64, align 8
+  store i64 %a0, i64* %v0, align 8
+  %v1 = call i32 @llvm.hexagon.S2.ct0p(i64 4222189076152335)
+  ret i32 %v1
+}
+
+; Function Attrs: nounwind readnone
+declare i32 @llvm.hexagon.S2.ct0p(i64) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/sdata-opaque-type.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/sdata-opaque-type.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/sdata-opaque-type.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/sdata-opaque-type.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,17 @@
+; RUN: llc -march=hexagon -O2 < %s
+; REQUIRES: asserts
+; This should compile cleanly.
+
+target triple = "hexagon"
+
+%s.0 = type opaque
+
+ at g0 = external global %s.0
+
+; Function Attrs: nounwind
+define %s.0* @f0() #0 {
+b0:
+  ret %s.0* @g0
+}
+
+attributes #0 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/sdata-stack-guard.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/sdata-stack-guard.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/sdata-stack-guard.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/sdata-stack-guard.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,52 @@
+; Check that the __stack_chk_guard was placed in small data.
+; RUN: llc -march=hexagon -O2 -hexagon-small-data-threshold=4 < %s | FileCheck -check-prefix=GPREL %s
+; GPREL: memw(gp+#__stack_chk_guard)
+
+; For threshold less than 4 (size of address), the variable is not placed in small-data
+; RUN: llc -march=hexagon -O2 -hexagon-small-data-threshold=0 < %s | FileCheck -check-prefix=ABS %s
+; ABS: memw(##__stack_chk_guard)
+
+ at g0 = private unnamed_addr constant [37 x i8] c"This string is longer than 16 bytes\0A\00", align 1
+ at g1 = private unnamed_addr constant [15 x i8] c"\0AChar 20 = %c\0A\00", align 1
+
+; Function Attrs: noinline nounwind ssp
+define zeroext i8 @f0(i32 %a0) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca [64 x i8], align 8
+  %v2 = alloca i8*, align 4
+  store i32 %a0, i32* %v0, align 4
+  store i8* getelementptr inbounds ([37 x i8], [37 x i8]* @g0, i32 0, i32 0), i8** %v2, align 4
+  %v3 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 0
+  %v4 = load i8*, i8** %v2, align 4
+  %v5 = call i8* @f1(i8* %v3, i8* %v4) #2
+  %v6 = load i32, i32* %v0, align 4
+  %v7 = getelementptr inbounds [64 x i8], [64 x i8]* %v1, i32 0, i32 %v6
+  %v8 = load i8, i8* %v7, align 1
+  ret i8 %v8
+}
+
+; Function Attrs: nounwind
+declare i8* @f1(i8*, i8*) #1
+
+; Function Attrs: noinline nounwind ssp
+define i32 @f2(i32 %a0, i8** %a1) #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  %v2 = alloca i8**, align 4
+  store i32 0, i32* %v0, align 4
+  store i32 %a0, i32* %v1, align 4
+  store i8** %a1, i8*** %v2, align 4
+  %v3 = call zeroext i8 @f0(i32 20)
+  %v4 = zext i8 %v3 to i32
+  %v5 = call i32 (i8*, ...) @f3(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @g1, i32 0, i32 0), i32 %v4) #2
+  ret i32 0
+}
+
+; Function Attrs: nounwind
+declare i32 @f3(i8*, ...) #1
+
+attributes #0 = { noinline nounwind ssp "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+attributes #1 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvx,+hvx-length64b" }
+attributes #2 = { nounwind }

Added: llvm/trunk/test/CodeGen/Hexagon/setmemrefs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/setmemrefs.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/setmemrefs.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/setmemrefs.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,31 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+
+; This test checks to see if, after lowering the two loads below, we set up the
+; memrefs of the resulting load MIs correctly, so that they are packetized
+; together.
+
+; CHECK: {
+; CHECK:       r{{[0-9]*}} = memw(r1{{[678]}}+#0)
+; CHECK-NEXT:  r{{[0-9]*}} = memw(r1{{[678]}}+#0)
+
+; Function Attrs: nounwind
+define i64 @f0(i32* nocapture %a0, i32* nocapture %a1, i32* nocapture %a2) #0 {
+b0:
+  %v0 = tail call i32 bitcast (i32 (...)* @f1 to i32 ()*)() #0
+  store i32 %v0, i32* %a2, align 4, !tbaa !0
+  %v1 = load i32, i32* %a0, align 4, !tbaa !0
+  %v2 = sext i32 %v1 to i64
+  %v3 = load i32, i32* %a1, align 4, !tbaa !0
+  %v4 = sext i32 %v3 to i64
+  %v5 = mul nsw i64 %v4, %v2
+  ret i64 %v5
+}
+
+declare i32 @f1(...)
+
+attributes #0 = { nounwind }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"int", !2}
+!2 = !{!"omnipotent char", !3}
+!3 = !{!"Simple C/C++ TBAA"}

Added: llvm/trunk/test/CodeGen/Hexagon/sfmin_dce.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/sfmin_dce.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/sfmin_dce.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/sfmin_dce.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,18 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: f0
+; CHECK-NOT: sfmin
+
+; Function Attrs: nounwind
+define void @f0(i32 %a0, i32 %a1) #0 {
+b0:
+  %v0 = bitcast i32 %a0 to float
+  %v1 = bitcast i32 %a1 to float
+  %v2 = tail call float @llvm.hexagon.F2.sfmin(float %v0, float %v1) #0
+  ret void
+}
+
+; Function Attrs: readnone
+declare float @llvm.hexagon.F2.sfmin(float, float) #1
+
+attributes #0 = { nounwind }
+attributes #1 = { readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/sfmpyacc_scale.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/sfmpyacc_scale.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/sfmpyacc_scale.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/sfmpyacc_scale.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,28 @@
+; RUN: llc -march=hexagon < %s | FileCheck %s
+; CHECK: r{{[0-9]*}} += sfmpy(r{{[0-9]*}},r{{[0-9]*}},p{{[0-3]}}):scale
+
+target triple = "hexagon"
+
+ at g0 = private unnamed_addr constant [65 x i8] c"%f :  Q6_R_sfmpyacc_RRp_scale(FLT_MIN,FLT_MIN,FLT_MIN,CHAR_MIN)\0A\00", align 1
+
+; Function Attrs: nounwind
+declare i32 @f0(i8*, ...) #0
+
+; Function Attrs: nounwind
+define i32 @f1() #0 {
+b0:
+  %v0 = alloca i32, align 4
+  %v1 = alloca i32, align 4
+  store i32 0, i32* %v0
+  store i32 0, i32* %v1, align 4
+  %v2 = call float @llvm.hexagon.F2.sffma.sc(float 0x3810000000000000, float 0x3810000000000000, float 0x3810000000000000, i32 0)
+  %v3 = fpext float %v2 to double
+  %v4 = call i32 (i8*, ...) @f0(i8* getelementptr inbounds ([65 x i8], [65 x i8]* @g0, i32 0, i32 0), double %v3) #0
+  ret i32 0
+}
+
+; Function Attrs: readnone
+declare float @llvm.hexagon.F2.sffma.sc(float, float, float, i32) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv55" }
+attributes #1 = { readnone }

Added: llvm/trunk/test/CodeGen/Hexagon/split-vecpred.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Hexagon/split-vecpred.ll?rev=327271&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Hexagon/split-vecpred.ll (added)
+++ llvm/trunk/test/CodeGen/Hexagon/split-vecpred.ll Mon Mar 12 07:01:28 2018
@@ -0,0 +1,106 @@
+; RUN: llc -march=hexagon < %s
+; REQUIRES: asserts
+
+; Test that the splitVecPredRegs pass in the Hexagon Peephole pass does not
+; move a vector predicate definition illegally, which ends up causing an assert
+; later. The assert occurs because there is a use of a register that does not
+; have a correct definition.
+
+define void @f0() local_unnamed_addr #0 {
+b0:
+  br label %b1
+
+b1:                                               ; preds = %b0
+  br i1 undef, label %b2, label %b3
+
+b2:                                               ; preds = %b1
+  unreachable
+
+b3:                                               ; preds = %b1
+  br label %b4
+
+b4:                                               ; preds = %b3
+  br label %b5
+
+b5:                                               ; preds = %b4
+  br i1 undef, label %b13, label %b6
+
+b6:                                               ; preds = %b5
+  br label %b7
+
+b7:                                               ; preds = %b6
+  br label %b8
+
+b8:                                               ; preds = %b7
+  %v0 = tail call <512 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> undef, i32 -1)
+  br i1 undef, label %b9, label %b11
+
+b9:                                               ; preds = %b8
+  br label %b12
+
+b10:                                              ; preds = %b12
+  br label %b11
+
+b11:                                              ; preds = %b10, %b8
+  %v1 = phi <512 x i1> [ %v0, %b8 ], [ undef, %b10 ]
+  %v2 = tail call <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1> %v1, <512 x i1> undef)
+  %v3 = tail call <16 x i32> @llvm.hexagon.V6.vaddbq(<512 x i1> %v2, <16 x i32> undef, <16 x i32> undef)
+  %v4 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v3, i32 undef)
+  %v5 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v4, <16 x i32> undef, i32 undef)
+  %v6 = tail call <16 x i32> @llvm.hexagon.V6.vand(<16 x i32> %v5, <16 x i32> undef)
+  %v7 = tail call <16 x i32> @llvm.hexagon.V6.vor(<16 x i32> %v6, <16 x i32> undef)
+  %v8 = tail call <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32> %v7, <16 x i32> undef)
+  %v9 = tail call <32 x i32> @llvm.hexagon.V6.vshufoeb(<16 x i32> undef, <16 x i32> %v8)
+  %v10 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %v9)
+  %v11 = tail call <16 x i32> @llvm.hexagon.V6.vor(<16 x i32> %v10, <16 x i32> undef)
+  %v12 = tail call <512 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32> %v11, i32 -1)
+  %v13 = tail call <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1> %v12, i32 undef)
+  tail call void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1> undef, i8* undef, <16 x i32> %v13)
+  unreachable
+
+b12:                                              ; preds = %b12, %b9
+  %v14 = phi i32 [ %v15, %b12 ], [ 0, %b9 ]
+  %v15 = add nuw nsw i32 %v14, 1
+  %v16 = icmp slt i32 %v15, undef
+  br i1 %v16, label %b12, label %b10
+
+b13:                                              ; preds = %b5
+  ret void
+}
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.vandvrt(<16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <512 x i1> @llvm.hexagon.V6.pred.and(<512 x i1>, <512 x i1>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32>, <16 x i32>, i32) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vandqrt(<512 x i1>, i32) #1
+
+; Function Attrs: argmemonly nounwind
+declare void @llvm.hexagon.V6.vmaskedstoreq(<512 x i1>, i8*, <16 x i32>) #2
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vaddbq(<512 x i1>, <16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vor(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vand(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.vsatwh(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <32 x i32> @llvm.hexagon.V6.vshufoeb(<16 x i32>, <16 x i32>) #1
+
+; Function Attrs: nounwind readnone
+declare <16 x i32> @llvm.hexagon.V6.hi(<32 x i32>) #1
+
+attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" }
+attributes #1 = { nounwind readnone }
+attributes #2 = { argmemonly nounwind }




More information about the llvm-commits mailing list